diff --git a/.dockerignore b/.dockerignore
new file mode 100644
index 00000000000..774e6421dd3
--- /dev/null
+++ b/.dockerignore
@@ -0,0 +1,2 @@
+obc-peer
+.git
diff --git a/.gitattributes b/.gitattributes
new file mode 100755
index 00000000000..1fe342eed2e
--- /dev/null
+++ b/.gitattributes
@@ -0,0 +1,11 @@
+*.sh text eol=lf
+*.go text eol=lf
+*.yaml text eol=lf
+*.yml text eol=lf
+*.md text eol=lf
+*.json text eol=lf
+*.proto text eol=lf
+*.py text eol=lf
+*.js text eol=lf
+*.txt text eol=lf
+LICENSE text eol=lf
diff --git a/.github/ISSUE_TEMPLATE.md b/.github/ISSUE_TEMPLATE.md
new file mode 100644
index 00000000000..b2192982bbd
--- /dev/null
+++ b/.github/ISSUE_TEMPLATE.md
@@ -0,0 +1,7 @@
+
+
+## Description
+
+
+## Describe How to Reproduce
+
diff --git a/.github/PULL_REQUEST_TEMPLATE.md b/.github/PULL_REQUEST_TEMPLATE.md
new file mode 100644
index 00000000000..f360b70097b
--- /dev/null
+++ b/.github/PULL_REQUEST_TEMPLATE.md
@@ -0,0 +1,26 @@
+
+
+## Description
+
+
+## Motivation and Context
+
+
+Fixes #
+
+## How Has This Been Tested?
+
+
+
+## Checklist:
+
+
+
+- [] I have added a [Signed-off-by](https://github.com/hyperledger/fabric/blob/master/CONTRIBUTING.md#legal-stuff).
+- [] I have either added documentation to cover my changes or this change requires no new documentation.
+- [] I have either added unit tests to cover my changes or this change requires no new tests.
+- [] I have run [golint](https://github.com/golang/lint) and have fixed valid warnings in code I have added or modified. This tool generates false positives so you may choose to ignore some warnings. The goal is clean, consistent, and readable code.
+
+
+
+Signed-off-by:
diff --git a/.gitignore b/.gitignore
new file mode 100755
index 00000000000..c8e327b6e60
--- /dev/null
+++ b/.gitignore
@@ -0,0 +1,27 @@
+# Local build binaries
+node-cli/node-cli
+*.pyc
+/build/*
+/bin
+.idea
+*.iml
+.DS_Store
+tags
+.tags
+.vagrant/
+/build
+# Emacs backup files
+*~
+*#
+.#*
+# bddtest log files
+*.log
+# bddtest coverage files
+bddtests/coverage
+*.cov
+# Makefile dummy artifacts
+.*-dummy
+# go-carpet output files
+go-carpet-coverage*
+# make node-sdk copied files
+sdk/node/lib/protos/*
diff --git a/.travis.yml b/.travis.yml
new file mode 100644
index 00000000000..990fe42b748
--- /dev/null
+++ b/.travis.yml
@@ -0,0 +1,66 @@
+language: go
+go:
+ - 1.6
+sudo: required
+services:
+ - docker
+env:
+ - TEST_TARGET=unit-test
+ - TEST_TARGET=behave
+
+before_install:
+
+ - echo "Starting Docker Daemon "
+ - |
+ export TR_PULL_REQUEST="$TRAVIS_PULL_REQUEST" && export GIT_USER="$TRAVIS_REPO_SLUG"
+ USER_NAME="$(echo $GIT_USER | cut -d '/' -f 1)" && REPO_NAME="$(echo $GIT_USER | cut -d '/' -f 2)"
+ ip="$(ifconfig docker0 | grep "inet addr:" | awk '{print $2}' | cut -d ':' -f 2)"
+ port="$(ps -ef | grep docker | awk '{print $11}' | cut -d ':' -f 3)"
+ sudo stop docker
+ sudo docker daemon -H tcp://0.0.0.0:$port -H unix:///var/run/docker.sock >> dockerlogfile.log 2>&1 &
+
+install:
+
+ - echo " INSTALLING DEPENDENCIES "
+ - |
+ cd $HOME/gopath/src/github.com/$USER_NAME/$REPO_NAME/scripts/provision/ && chmod +x host.sh && sudo ./host.sh
+ echo " Installing Rocks DB, g++ compilers & Dependencies "
+ sudo add-apt-repository -y ppa:ubuntu-toolchain-r/test && sudo apt-get -qq update && sudo apt-get -qq install g++-4.8 && sudo update-alternatives --install /usr/bin/g++ g++ /usr/bin/g++-4.8 90
+ sudo apt-get install build-essential -y
+ sudo apt-get install -y libsnappy-dev zlib1g-dev libbz2-dev
+ cd /tmp
+ git clone --branch v4.1 --single-branch --depth 1 https://github.com/facebook/rocksdb.git
+ cd rocksdb
+ make shared_lib
+ sudo INSTALL_PATH=/usr/local make install-shared
+ sudo ldconfig
+
+before_script:
+
+ - echo " CREATING BASE IMAGE "
+ - cd $HOME/gopath/src/github.com/$USER_NAME/$REPO_NAME/scripts && chmod +x foldercopy.sh && ./foldercopy.sh $TR_PULL_REQUEST $USER_NAME $REPO_NAME
+ - sudo rm -rf /var/hyperledger/ && sudo mkdir /var/hyperledger/ && sudo chown $USER:$USER /var/hyperledger
+ - cd /$HOME/gopath/src/github.com/hyperledger/fabric
+ - make linter
+
+script:
+
+ - echo "Executing Tests"
+ - cd $HOME/gopath/src/github.com/hyperledger/fabric
+ - sed -i -e 's/172.17.0.1:2375\b/'"$ip:$port"'/g' $HOME/gopath/src/github.com/hyperledger/fabric/bddtests/compose-defaults.yml
+ - export BEHAVE_OPTS="-D logs=Y -o testsummary.log" #Defined to both jobs.
+ - make $TEST_TARGET
+
+after_failure:
+
+ - |
+ echo "Click below links to view behave container log files"
+ cd $HOME/gopath/src/github.com/hyperledger/fabric
+ chmod +x scripts/containerlogs.sh
+ sudo ./scripts/containerlogs.sh
+
+notifications:
+
+ slack: 'hyperledgerproject:azMP8Mw3cfGigXkqi5RujZXr'
+ on_success: always
+ on_failure: always
diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md
new file mode 100644
index 00000000000..8fb27c42aed
--- /dev/null
+++ b/CONTRIBUTING.md
@@ -0,0 +1,120 @@
+### Welcome
+
+We welcome contributions to the Hyperledger Project in many forms, and there's always plenty to do!
+
+First things first, please review the Hyperledger Project's [Code of Conduct](https://github.com/hyperledger/hyperledger/wiki/Hyperledger-Project-Code-of-Conduct) before participating. It is important that we keep things civil.
+
+### Getting help
+If you are looking for something to work on, or need some expert assistance in debugging a problem or working out a fix to an issue, our [community](https://www.hyperledger.org/community) is always eager to help. We hang out on [Slack](https://hyperledgerproject.slack.com/), IRC (#hyperledger on freenode.net) and the [mailing lists](http://lists.hyperledger.org/). Most of us don't bite ;-) and will be glad to help.
+
+### Requirements and Use Cases
+We have a [Requirements WG](https://github.com/hyperledger/hyperledger/wiki/Requirements-WG) that is documenting use cases and from those use cases deriving requirements. If you are interested in contributing to this effort, please feel free to join the discussion in [slack](https://hyperledgerproject.slack.com/messages/requirements/).
+
+### Reporting bugs
+If you are a user and you find a bug, please submit an [issue](https://github.com/hyperledger/fabric/issues). Please try to provide sufficient information for someone else to reproduce the issue. One of the project's maintainers should respond to your issue within 24 hours. If not, please bump the issue and request that it be reviewed.
+
+### Fixing issues and working stories
+Review the [issues list](https://github.com/hyperledger/fabric/issues) and find something that interests you. You could also check the ["help wanted"](https://github.com/hyperledger/fabric/issues?q=is%3Aissue+is%3Aopen+label%3A%22help+wanted%22) list. It is wise to start with something relatively straight forward and achievable. Usually there will be a comment in the issue that indicates whether someone has already self-assigned the issue. If no one has already taken it, then add a comment assigning the issue to yourself, eg.: ```I'll work on this issue.```. Please be considerate and rescind the offer in comments if you cannot finish in a reasonable time, or add a comment saying that you are still actively working the issue if you need a little more time.
+
+We are using the [GitHub Flow](https://guides.github.com/introduction/flow/) process to manage code contributions. If you are unfamiliar, please review that link before proceeding.
+
+To work on something, whether a new feature or a bugfix:
+ 1. Create a [fork](https://help.github.com/articles/fork-a-repo/) (if you haven't already)
+
+ 2. Clone it locally
+ ```
+ git clone https://github.com/yourid/fabric.git
+ ```
+
+ 3. Add the upstream repository as a remote
+ ```
+ git remote add upstream https://github.com/hyperledger/fabric.git
+ ```
+
+ 4. Create a branch
+ Create a descriptively-named branch off of your cloned fork ([more detail here](https://help.github.com/articles/syncing-a-fork/))
+ ```
+ cd fabric
+ git checkout -b issue-nnnn
+ ```
+
+ 5. Commit your code
+
+ Commit to that branch locally, and regularly push your work to the same branch on the server.
+
+ 6. Commit messages
+
+ Commit messages must have a short description no longer than 50 characters followed by a blank line and a longer, more descriptive message that includes reference to issue(s) being addressed so that they will be automatically closed on a merge e.g. ```Closes #1234``` or ```Fixes #1234```.
+
+ 7. Pull Request (PR)
+
+ **Note:** Each source file must include a license header for the Apache Software License 2.0. A template of that header can be found [here](https://github.com/hyperledger/fabric/blob/master/docs/dev-setup/headers.txt).
+
+ When you need feedback or help, or you think the branch is ready for merging, open a pull request (make sure you have first successfully built and tested with the [Unit and Behave Tests](docs/dev-setup/install.md#3-test)).
+
+ _Note: if your PR does not merge cleanly, use ```git rebase master``` in your feature branch to update your pull request rather than using ```git merge master```_.
+
+ 8. Did we mention tests? All code changes should be accompanied by new or modified tests.
+
+ 9. Continuous Integration (CI): Be sure to check [Travis](https://travis-ci.org/) or the Slack [#fabric-ci-status](https://hyperledgerproject.slack.com/messages/fabric-ci-status) channel for status of your build. You can re-trigger a build on [Jenkins](https://jenkins.io/) with a PR comment containing `reverify jenkins`.
+
+ **Note:** While some underlying work to migrate the build system from Travis to Jenkins is taking place, you can ask the [maintainers](https://github.com/hyperledger/fabric/blob/master/MAINTAINERS.txt) to re-trigger a Travis build for your PR, either by adding a comment to the PR or on the [#fabric-ci-status](https://hyperledgerproject.slack.com/messages/fabric-ci-status) Slack channel.
+
+ 10. Any code changes that affect documentation should be accompanied by corresponding changes (or additions) to the documentation and tests. This will ensure that if the merged PR is reversed, all traces of the change will be reversed as well.
+
+After your Pull Request (PR) has been reviewed and signed off, a maintainer will merge it into the master branch.
+
+## Coding guidelines
+
+### Coding Golang
+- We code in Go™ and strictly follow the [best practices](http://golang.org/doc/effective_go.html)
+and will not accept any deviations. You must run the following tools against your Go code and fix all errors and warnings:
+ - [golint](https://github.com/golang/lint)
+ - [go vet](https://golang.org/cmd/vet/)
+ - [goimports](https://godoc.org/golang.org/x/tools/cmd/goimports)
+
+ ## Generating gRPC code
+
+ If you modify any `.proto` files, run the following command to generate/update the respective `.pb.go` files.
+
+ ```
+ cd $GOPATH/src/github.com/hyperledger/fabric
+ make protos
+ ```
+
+ ## Adding or updating Go packages
+
+ The Hyperledger Fabric Project uses Go 1.6 vendoring for package management. This means that all required packages reside in the `vendor` folder within the fabric project. Go will use packages in this folder instead of the GOPATH when the `go install` or `go build` commands are executed. To manage the packages in the `vendor` folder, we use [Govendor](https://github.com/kardianos/govendor), which is installed in the Vagrant environment. The following commands can be used for package management:
+
+ ```
+ # Add external packages.
+ govendor add +external
+
+ # Add a specific package.
+ govendor add github.com/kardianos/osext
+
+ # Update vendor packages.
+ govendor update +vendor
+
+ # Revert back to normal GOPATH packages.
+ govendor remove +vendor
+
+ # List package.
+ govendor list
+ ```
+
+### Becoming a maintainer
+This project is managed under open governance model as described in our [charter](https://www.hyperledger.org/about/charter). Projects or sub-projects will be lead by a set of maintainers. New projects can designate an initial set of maintainers that will be approved by the Technical Steering Committee when the project is first approved. The project's maintainers will, from time-to-time, consider adding a new maintainer. An existing maintainer will post a pull request to the [MAINTAINERS.txt](MAINTAINERS.txt) file. If a majority of the maintainers concur in the comments, the pull request is then merged and the individual becomes a maintainer.
+
+### Legal stuff
+
+**Note:** Each source file must include a license header for the Apache Software License 2.0. A template of that header can be found [here](https://github.com/hyperledger/fabric/blob/master/docs/dev-setup/headers.txt).
+
+We have tried to make it as easy as possible to make contributions. This applies to how we handle the legal aspects of contribution. We use the same approach—the [Developer's Certificate of Origin 1.1 (DCO)](docs/biz/DCO1.1.txt)—that the Linux® Kernel [community](http://elinux.org/Developer_Certificate_Of_Origin) uses to manage code contributions.
+We simply ask that when submitting a pull request, the developer must include a sign-off statement in the pull request description.
+
+Here is an example Signed-off-by line, which indicates that the submitter accepts the DCO:
+
+```
+Signed-off-by: John Doe
+```
diff --git a/LICENSE b/LICENSE
new file mode 100644
index 00000000000..8f71f43fee3
--- /dev/null
+++ b/LICENSE
@@ -0,0 +1,202 @@
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "{}"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright {yyyy} {name of copyright owner}
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+
diff --git a/MAINTAINERS.txt b/MAINTAINERS.txt
new file mode 100644
index 00000000000..d06ad68f664
--- /dev/null
+++ b/MAINTAINERS.txt
@@ -0,0 +1,11 @@
+Maintainers
+
+Binh Nguyen binhn binhn@us.ibm.com
+Sheehan Anderson srderson sheehan@us.ibm.com
+Tamas Blummer tamasblummer tamas@digitalasset.com
+Robert Fajta rfajta robert@digitalasset.com
+Greg Haskins ghaskins ghaskins@lseg.com
+Jonathan Levi JonathanLevi jonathan@levi.name
+Gabor Hosszu gabre gabor@digitalasset.com
+Simon Schubert corecode sis@zurich.ibm.com
+Chris Ferris christo4ferris chrisfer@us.ibm.com
diff --git a/Makefile b/Makefile
new file mode 100644
index 00000000000..55c3a611c80
--- /dev/null
+++ b/Makefile
@@ -0,0 +1,206 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+# -------------------------------------------------------------
+# This makefile defines the following targets
+#
+# - all (default) - builds all targets and runs all tests/checks
+# - checks - runs all tests/checks
+# - peer - builds the fabric peer binary
+# - membersrvc - builds the membersrvc binary
+# - unit-test - runs the go-test based unit tests
+# - behave - runs the behave test
+# - behave-deps - ensures pre-requisites are availble for running behave manually
+# - gotools - installs go tools like golint
+# - linter - runs all code checks
+# - images[-clean] - ensures all docker images are available[/cleaned]
+# - peer-image[-clean] - ensures the peer-image is available[/cleaned] (for behave, etc)
+# - membersrvc-image[-clean] - ensures the membersrvc-image is available[/cleaned] (for behave, etc)
+# - protos - generate all protobuf artifacts based on .proto files
+# - node-sdk - builds the node.js client sdk
+# - node-sdk-unit-tests - runs the node.js client sdk unit tests
+# - clean - cleans the build area
+# - dist-clean - superset of 'clean' that also removes persistent state
+
+PROJECT_NAME=hyperledger/fabric
+PKGNAME = github.com/$(PROJECT_NAME)
+CGO_FLAGS = CGO_CFLAGS=" " CGO_LDFLAGS="-lrocksdb -lstdc++ -lm -lz -lbz2 -lsnappy"
+UID = $(shell id -u)
+
+EXECUTABLES = go docker git
+K := $(foreach exec,$(EXECUTABLES),\
+ $(if $(shell which $(exec)),some string,$(error "No $(exec) in PATH: Check dependencies")))
+
+# SUBDIRS are components that have their own Makefiles that we can invoke
+SUBDIRS = gotools sdk/node
+SUBDIRS:=$(strip $(SUBDIRS))
+
+# Make our baseimage depend on any changes to images/base or scripts/provision
+BASEIMAGE_RELEASE = $(shell cat ./images/base/release)
+BASEIMAGE_DEPS = $(shell git ls-files images/base scripts/provision)
+
+PROJECT_FILES = $(shell git ls-files)
+IMAGES = base src ccenv peer membersrvc
+
+all: peer membersrvc checks
+
+checks: linter unit-test behave
+
+.PHONY: $(SUBDIRS)
+$(SUBDIRS):
+ cd $@ && $(MAKE)
+
+.PHONY: peer
+peer: build/bin/peer
+peer-image: build/image/peer/.dummy
+
+.PHONY: membersrvc
+membersrvc: build/bin/membersrvc
+membersrvc-image: build/image/membersrvc/.dummy
+
+unit-test: peer-image gotools
+ @./scripts/goUnitTests.sh
+
+.PHONY: images
+images: $(patsubst %,build/image/%/.dummy, $(IMAGES))
+
+behave-deps: images peer
+behave: behave-deps
+ @echo "Running behave tests"
+ @cd bddtests; behave $(BEHAVE_OPTS)
+
+linter: gotools
+ @echo "LINT: Running code checks.."
+ @echo "Running go vet"
+ go vet ./consensus/...
+ go vet ./core/...
+ go vet ./events/...
+ go vet ./examples/...
+ go vet ./membersrvc/...
+ go vet ./peer/...
+ go vet ./protos/...
+ @echo "Running goimports"
+ @./scripts/goimports.sh
+
+# We (re)build protoc-gen-go from within docker context so that
+# we may later inject the binary into a different docker environment
+# This is necessary since we cannot guarantee that binaries built
+# on the host natively will be compatible with the docker env.
+%/bin/protoc-gen-go: build/image/base/.dummy Makefile
+ @echo "Building $@"
+ @mkdir -p $(@D)
+ @docker run -i \
+ --user=$(UID) \
+ -v $(abspath vendor/github.com/golang/protobuf):/opt/gopath/src/github.com/golang/protobuf \
+ -v $(abspath $(@D)):/opt/gopath/bin \
+ hyperledger/fabric-baseimage go install github.com/golang/protobuf/protoc-gen-go
+
+%/bin/chaintool:
+ @echo "Installing chaintool"
+ @cp devenv/tools/chaintool $@
+
+# We (re)build a package within a docker context but persist the $GOPATH/pkg
+# directory so that subsequent builds are faster
+build/docker/bin/%: build/image/src/.dummy $(PROJECT_FILES)
+ $(eval TARGET = ${patsubst build/docker/bin/%,%,${@}})
+ @echo "Building $@"
+ @mkdir -p build/docker/bin build/docker/pkg
+ @docker run -i \
+ --user=$(UID) \
+ -v $(abspath build/docker/bin):/opt/gopath/bin \
+ -v $(abspath build/docker/pkg):/opt/gopath/pkg \
+ hyperledger/fabric-src go install github.com/hyperledger/fabric/$(TARGET)
+
+build/bin:
+ mkdir -p $@
+
+# Both peer and peer-image depend on ccenv-image
+build/bin/peer: build/image/ccenv/.dummy
+build/image/peer/.dummy: build/image/ccenv/.dummy
+build/image/peer/.dummy: build/docker/bin/examples/events/block-listener/
+
+build/bin/%: build/image/base/.dummy $(PROJECT_FILES)
+ @mkdir -p $(@D)
+ @echo "$@"
+ $(CGO_FLAGS) GOBIN=$(abspath $(@D)) go install $(PKGNAME)/$(@F)
+ @echo "Binary available as $@"
+ @touch $@
+
+# Special override for base-image.
+build/image/base/.dummy: $(BASEIMAGE_DEPS)
+ @echo "Building docker base-image"
+ @mkdir -p $(@D)
+ @./scripts/provision/docker.sh $(BASEIMAGE_RELEASE)
+ @touch $@
+
+# Special override for src-image
+build/image/src/.dummy: build/image/base/.dummy $(PROJECT_FILES)
+ @echo "Building docker src-image"
+ @mkdir -p $(@D)
+ @cat images/src/Dockerfile.in > $(@D)/Dockerfile
+ @git ls-files | tar -jcT - > $(@D)/gopath.tar.bz2
+ docker build -t $(PROJECT_NAME)-src:latest $(@D)
+ @touch $@
+
+# Special override for ccenv-image (chaincode-environment)
+build/image/ccenv/.dummy: build/image/src/.dummy build/image/ccenv/bin/protoc-gen-go build/image/ccenv/bin/chaintool Makefile
+ @echo "Building docker ccenv-image"
+ @cat images/ccenv/Dockerfile.in > $(@D)/Dockerfile
+ docker build -t $(PROJECT_NAME)-ccenv:latest $(@D)
+ @touch $@
+
+# Default rule for image creation
+build/image/%/.dummy: build/image/src/.dummy build/docker/bin/%
+ $(eval TARGET = ${patsubst build/image/%/.dummy,%,${@}})
+ @echo "Building docker $(TARGET)-image"
+ @mkdir -p $(@D)/bin
+ @cat images/app/Dockerfile.in | sed -e 's/_TARGET_/$(TARGET)/g' > $(@D)/Dockerfile
+ cp build/docker/bin/$(TARGET) $(@D)/bin
+ docker build -t $(PROJECT_NAME)-$(TARGET):latest $(@D)
+ @touch $@
+
+.PHONY: protos
+protos: gotools
+ ./devenv/compile_protos.sh
+
+base-image-clean:
+ -docker rmi -f $(PROJECT_NAME)-baseimage
+ -@rm -rf build/image/base ||:
+
+%-image-clean:
+ $(eval TARGET = ${patsubst %-image-clean,%,${@}})
+ -docker rmi -f $(PROJECT_NAME)-$(TARGET)
+ -@rm -rf build/image/$(TARGET) ||:
+
+images-clean: $(patsubst %,%-image-clean, $(IMAGES))
+
+node-sdk: sdk/node
+
+node-sdk-unit-tests: peer membersrvc
+ cd sdk/node && $(MAKE) unit-tests
+
+.PHONY: $(SUBDIRS:=-clean)
+$(SUBDIRS:=-clean):
+ cd $(patsubst %-clean,%,$@) && $(MAKE) clean
+
+.PHONY: clean
+clean: images-clean $(filter-out gotools-clean, $(SUBDIRS:=-clean))
+ -@rm -rf build ||:
+
+.PHONY: dist-clean
+dist-clean: clean gotools-clean
+ -@rm -rf /var/hyperledger/* ||:
diff --git a/README.md b/README.md
new file mode 100644
index 00000000000..c93a211b031
--- /dev/null
+++ b/README.md
@@ -0,0 +1,41 @@
+[](https://travis-ci.org/hyperledger/fabric)
+[](https://goreportcard.com/report/github.com/hyperledger/fabric)
+[](https://godoc.org/github.com/hyperledger/fabric)
+[](http://hyperledger-fabric.readthedocs.io/en/latest/?badge=latest)
+
+# Incubation Notice
+This project is a Hyperledger project in _Incubation_. It was proposed to the community and documented [here](https://goo.gl/RYQZ5N). Information on what _Incubation_ entails can be found in the [Hyperledger Project Lifecycle document](https://goo.gl/4edNRc).
+
+# Hyperledger Fabric
+The fabric is an implementation of blockchain technology, leveraging familiar and proven technologies. It is a modular architecture allowing pluggable implementations of various function. It features powerful container technology to host any mainstream language for smart contracts development.
+
+## Releases
+The fabric releases are documented [here](https://github.com/hyperledger/fabric/wiki/Fabric-Releases). We have just released our first release under the governance of the Hyperledger Project - v0.5-developer-preview.
+
+## Contributing to the project
+We welcome contributions to the Hyperledger Project in many forms. There's always plenty to do! Full details of how to contribute to this project are documented in the [CONTRIBUTING.md](CONTRIBUTING.md) file.
+
+## Maintainers
+The project's [maintainers](MAINTAINERS.txt): are responsible for reviewing and merging all pull requests and they guide the over-all technical direction of the project within the guidelines established by the Hyperledger Project's Technical Steering Committee (TSC).
+
+## Communication
+We use [Hyperledger Slack](https://slack.hyperledger.org/) for communication and Google Hangouts™ for screen sharing between developers.
+
+## Installing the fabric
+[Installation](docs/Setup/Network-setup.md): Describes how to install the blockchain fabric and use project tools.
+
+## Documentation
+Project documentation can be found [here](http://hyperledger-fabric.readthedocs.io/en/latest/).
+
+## Still Have Questions?
+For general purpose questions, please use [StackOverflow](http://stackoverflow.com/questions/tagged/hyperledger).
+
+## License
+The Hyperledger Project uses the [Apache License Version 2.0](LICENSE) software license.
+
+## Related information
+If you are new to the project, you can begin by reviewing the following documents:
+
+ - [Whitepaper WG](https://github.com/hyperledger/hyperledger/wiki/Whitepaper-WG)
+ - [Requirements WG](https://github.com/hyperledger/hyperledger/wiki/Requirements-WG)
+ - [Protocol Specification](docs/protocol-spec.md)
diff --git a/TravisCI_Readme.md b/TravisCI_Readme.md
new file mode 100644
index 00000000000..8da35ff749e
--- /dev/null
+++ b/TravisCI_Readme.md
@@ -0,0 +1,105 @@
+##Continuous Integration Process:#
+
+Continuous Integration is a development practice that requires developers to integrate code into a shared repository. Each time developer checks in code into the repository, it is then verified by an automated build process. This process gives flexibility for the developer to detect any build issues early in the build life cycle.
+
+**hyperledger** build process is fully automated using **Travis CI** Continuous Integration tool, which helps in building a real time solution for all the code check-ins, perform Unit and Functional Tests. Once the build execution completes, developer will get build result notifications on slack, GitHub and email (Depending on configuration settings provided in .travis.yml file)
+
+**Master Repository** can be found at [**hyperledger**] (https://github.com/hyperledger/fabric.git).
+
+##Setting up Continuous Integration Process:
+
+- Login to [GitHub] (https://github.com) --> Fork and clone the [**hyperledger**](https://github.com/hyperledger/fabric.git) project into your *GitHub* account, if you weren't already. If you have a forked repository in your GitHub account, please pull **hyperledger/fabric master repository**. So that, updated .travis.yml (`Configuration file for Travis CI`) file will be copied into your repository.
+
+###### Perform **Travis CI** integration in **GitHub**:
+
+- Click on **Settings** tab in forked **fabric** GitHub repository and click on **Webhooks & Services** option. Click on **Add Service** and click on **Travis CI** link in services panel. Provide below details
+
+- User (GitHub User Name)
+- Token (Generate token from profile - settings - Personal access token - click on Generate New Token) - Check on "public_repo" scope and click on generate token button. Copy and paste it in Token field
+- Domain (Enter "notify.travis-ci.org")
+
+- Click on Add Service button
+
+This will enable integration between Travis CI and GitHub for the selected repository. After successful integration, **Travis CI** service will appear in Services menu.
+
+
+
+###### Sync and Enable fabric repository in Travis:
+
+- Browse [Travis CI](http://travis-ci.org) and click on **Sign in with GitHub** button and provide GitHub credentials.
+
+- http://travis-ci.org - for Public Repositories. http://travis-ci.com - for Private repositories.
+
+- After login to Travis CI --> Click on *Accounts* link under Username (Available at the top of right corner) and click on **Sync account** button. This will sync and display all the repositories available for the logged in user. As a next step user has to flick ON for the required repositories. After successful flick, refresh the Travis home page and you see all the selected repositories available in *My Repositories* section in Travis home page.
+
+- In more options menu, click on **Settings** and enable general settings (**Build only if .travis.yml is present** , **Build Pushes** , **Limit Current jobs** , **Build pull requests**) for the selected repository.
+
+
+
+- Disable **Build Pull Requests** option if you don't want to trigger automation build deployment for any `Pull Requests`.
+
+**Add Build status markdown link in Readme.md file**
+
+- Copy markdown link from Travis CI home page and place it in Readme.md file of your working branch. Follow [Embedding Status Images](https://docs.travis-ci.com/user/status-images) that helps you to setup the build status in Readme.md file.
+
+Note: Please make sure **.travis.yml** , **foldercopy.sh** and **containerlogs.sh** are present with below modifications in the master branch or the working branch in GitHub before performing any ` git push ` operations.
+
+- Change notifications section as per user preferences:
+
+Follow [Travis Notification Settings](https://docs.travis-ci.com/user/notifications) to setup notifications in .travis.yml file.
+
+Repository Owner has to provide slack token. Please get in touch with him/her for your Slack Token.
+
+```
+notifications:
+slack:: ex: slack:openchain:
+ on_success: always
+ on_failure: always
+ email:
+ recipients:
+ - one@example.com
+ - other@example.com
+ on_success: [always|never|change] # default: change
+ on_failure: [always|never|change] # default: always
+```
+
+Now you have completed with Travis CI setup process. If you make any changes in your code and push code to remote repository, Travis CI automatically starts the build process and shows you the build results on your notification settings (Slack, email and on GitHub Readme.md).
+
+
+
+**Build Process includes below steps:**
+
+1. git clone on updated git repository into Travis cloud environment from GitHub.
+2. Install all dependency software's and libraries
+3. Perform go build
+4. Start Peer process
+5. Perform unit tests (go tests)
+6. Perform Behave test suite (behave tests)
+7. Provides failed container log files in travis raw log file.
+8. Update slack channel (#fabric-ci-status) with build results.
+
+## More Information/Troubleshooting:
+
+- Developer can skip the Travis CI build process by providing ` [ci skip] ` in the git commit message.
+```
+git commit -m "Ignore build process [ci skip]"
+
+```
+- How to skip Travis Build execution for PR's:?
+
+ - Travis CI checks latest commit of PR and if the commit message is tagged with [ci skip], Travis CI ignores build process.
+ - This will be useful, when you want to open a pull request early for review but you are not necessarily ready to run the tests right away. Also, you can skip Travis build process for document changes.
+ - Right now, Travis only supports above method to skip build process.
+
+- What is the slack channel to view build results?
+ - We are sending build notifications to hyperledger `#fabric-ci-status` slack channel. (User must join in #fabric-ci-status slack channel to receive build notifications)
+
+- How to restart build without committing any changes to remote GitHub?
+
+ - Apply `git commit --allow-empty -m "Empty commit" ` and do a git push or click on `Restart Job` (only users with push access to repository can do this) button on Travis CI home page.
+
+- Where can I find Build log files?
+ - Click on `RAW log` link on Travis CI home page.
+
+- Where can I find Behave Container log files?
+ - Click on each container log file link displays bottom of the RAW log file.
diff --git a/bddtests/.behaverc b/bddtests/.behaverc
new file mode 100644
index 00000000000..82605eb229c
--- /dev/null
+++ b/bddtests/.behaverc
@@ -0,0 +1,10 @@
+[behave]
+tags=~@issue_724
+ ~@issue_767
+ ~@issueUtxo
+ ~@issue_477
+ ~@issue_680
+ ~@issue_1207
+ ~@issue_1565
+ ~@issue_RBAC_TCERT_With_Attributes
+ ~@sdk
diff --git a/bddtests/api_pb2.py b/bddtests/api_pb2.py
new file mode 100644
index 00000000000..72c4e0511c5
--- /dev/null
+++ b/bddtests/api_pb2.py
@@ -0,0 +1,229 @@
+# Generated by the protocol buffer compiler. DO NOT EDIT!
+# source: api.proto
+
+import sys
+_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
+from google.protobuf import descriptor as _descriptor
+from google.protobuf import message as _message
+from google.protobuf import reflection as _reflection
+from google.protobuf import symbol_database as _symbol_database
+from google.protobuf import descriptor_pb2
+# @@protoc_insertion_point(imports)
+
+_sym_db = _symbol_database.Default()
+
+
+import fabric_pb2 as fabric__pb2
+from google.protobuf import empty_pb2 as google_dot_protobuf_dot_empty__pb2
+
+
+DESCRIPTOR = _descriptor.FileDescriptor(
+ name='api.proto',
+ package='protos',
+ syntax='proto3',
+ serialized_pb=_b('\n\tapi.proto\x12\x06protos\x1a\x0c\x66\x61\x62ric.proto\x1a\x1bgoogle/protobuf/empty.proto\"\x1d\n\x0b\x42lockNumber\x12\x0e\n\x06number\x18\x01 \x01(\x04\"\x1b\n\nBlockCount\x12\r\n\x05\x63ount\x18\x01 \x01(\x04\x32\x87\x02\n\tOpenchain\x12\x45\n\x11GetBlockchainInfo\x12\x16.google.protobuf.Empty\x1a\x16.protos.BlockchainInfo\"\x00\x12\x38\n\x10GetBlockByNumber\x12\x13.protos.BlockNumber\x1a\r.protos.Block\"\x00\x12=\n\rGetBlockCount\x12\x16.google.protobuf.Empty\x1a\x12.protos.BlockCount\"\x00\x12:\n\x08GetPeers\x12\x16.google.protobuf.Empty\x1a\x14.protos.PeersMessage\"\x00\x62\x06proto3')
+ ,
+ dependencies=[fabric__pb2.DESCRIPTOR,google_dot_protobuf_dot_empty__pb2.DESCRIPTOR,])
+_sym_db.RegisterFileDescriptor(DESCRIPTOR)
+
+
+
+
+_BLOCKNUMBER = _descriptor.Descriptor(
+ name='BlockNumber',
+ full_name='protos.BlockNumber',
+ filename=None,
+ file=DESCRIPTOR,
+ containing_type=None,
+ fields=[
+ _descriptor.FieldDescriptor(
+ name='number', full_name='protos.BlockNumber.number', index=0,
+ number=1, type=4, cpp_type=4, label=1,
+ has_default_value=False, default_value=0,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None),
+ ],
+ extensions=[
+ ],
+ nested_types=[],
+ enum_types=[
+ ],
+ options=None,
+ is_extendable=False,
+ syntax='proto3',
+ extension_ranges=[],
+ oneofs=[
+ ],
+ serialized_start=64,
+ serialized_end=93,
+)
+
+
+_BLOCKCOUNT = _descriptor.Descriptor(
+ name='BlockCount',
+ full_name='protos.BlockCount',
+ filename=None,
+ file=DESCRIPTOR,
+ containing_type=None,
+ fields=[
+ _descriptor.FieldDescriptor(
+ name='count', full_name='protos.BlockCount.count', index=0,
+ number=1, type=4, cpp_type=4, label=1,
+ has_default_value=False, default_value=0,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None),
+ ],
+ extensions=[
+ ],
+ nested_types=[],
+ enum_types=[
+ ],
+ options=None,
+ is_extendable=False,
+ syntax='proto3',
+ extension_ranges=[],
+ oneofs=[
+ ],
+ serialized_start=95,
+ serialized_end=122,
+)
+
+DESCRIPTOR.message_types_by_name['BlockNumber'] = _BLOCKNUMBER
+DESCRIPTOR.message_types_by_name['BlockCount'] = _BLOCKCOUNT
+
+BlockNumber = _reflection.GeneratedProtocolMessageType('BlockNumber', (_message.Message,), dict(
+ DESCRIPTOR = _BLOCKNUMBER,
+ __module__ = 'api_pb2'
+ # @@protoc_insertion_point(class_scope:protos.BlockNumber)
+ ))
+_sym_db.RegisterMessage(BlockNumber)
+
+BlockCount = _reflection.GeneratedProtocolMessageType('BlockCount', (_message.Message,), dict(
+ DESCRIPTOR = _BLOCKCOUNT,
+ __module__ = 'api_pb2'
+ # @@protoc_insertion_point(class_scope:protos.BlockCount)
+ ))
+_sym_db.RegisterMessage(BlockCount)
+
+
+import abc
+import six
+from grpc.beta import implementations as beta_implementations
+from grpc.beta import interfaces as beta_interfaces
+from grpc.framework.common import cardinality
+from grpc.framework.interfaces.face import utilities as face_utilities
+
+class BetaOpenchainServicer(object):
+ """Interface exported by the server.
+ """
+ def GetBlockchainInfo(self, request, context):
+ """GetBlockchainInfo returns information about the blockchain ledger such as
+ height, current block hash, and previous block hash.
+ """
+ context.code(beta_interfaces.StatusCode.UNIMPLEMENTED)
+ def GetBlockByNumber(self, request, context):
+ """GetBlockByNumber returns the data contained within a specific block in the
+ blockchain. The genesis block is block zero.
+ """
+ context.code(beta_interfaces.StatusCode.UNIMPLEMENTED)
+ def GetBlockCount(self, request, context):
+ """GetBlockCount returns the current number of blocks in the blockchain data
+ structure.
+ """
+ context.code(beta_interfaces.StatusCode.UNIMPLEMENTED)
+ def GetPeers(self, request, context):
+ """GetPeers returns a list of all peer nodes currently connected to the target
+ peer.
+ """
+ context.code(beta_interfaces.StatusCode.UNIMPLEMENTED)
+
+class BetaOpenchainStub(object):
+ """Interface exported by the server.
+ """
+ def GetBlockchainInfo(self, request, timeout):
+ """GetBlockchainInfo returns information about the blockchain ledger such as
+ height, current block hash, and previous block hash.
+ """
+ raise NotImplementedError()
+ GetBlockchainInfo.future = None
+ def GetBlockByNumber(self, request, timeout):
+ """GetBlockByNumber returns the data contained within a specific block in the
+ blockchain. The genesis block is block zero.
+ """
+ raise NotImplementedError()
+ GetBlockByNumber.future = None
+ def GetBlockCount(self, request, timeout):
+ """GetBlockCount returns the current number of blocks in the blockchain data
+ structure.
+ """
+ raise NotImplementedError()
+ GetBlockCount.future = None
+ def GetPeers(self, request, timeout):
+ """GetPeers returns a list of all peer nodes currently connected to the target
+ peer.
+ """
+ raise NotImplementedError()
+ GetPeers.future = None
+
+def beta_create_Openchain_server(servicer, pool=None, pool_size=None, default_timeout=None, maximum_timeout=None):
+ import google.protobuf.empty_pb2
+ import fabric_pb2
+ import api_pb2
+ import fabric_pb2
+ import google.protobuf.empty_pb2
+ import api_pb2
+ import google.protobuf.empty_pb2
+ import fabric_pb2
+ request_deserializers = {
+ ('protos.Openchain', 'GetBlockByNumber'): api_pb2.BlockNumber.FromString,
+ ('protos.Openchain', 'GetBlockCount'): google.protobuf.empty_pb2.Empty.FromString,
+ ('protos.Openchain', 'GetBlockchainInfo'): google.protobuf.empty_pb2.Empty.FromString,
+ ('protos.Openchain', 'GetPeers'): google.protobuf.empty_pb2.Empty.FromString,
+ }
+ response_serializers = {
+ ('protos.Openchain', 'GetBlockByNumber'): fabric_pb2.Block.SerializeToString,
+ ('protos.Openchain', 'GetBlockCount'): api_pb2.BlockCount.SerializeToString,
+ ('protos.Openchain', 'GetBlockchainInfo'): fabric_pb2.BlockchainInfo.SerializeToString,
+ ('protos.Openchain', 'GetPeers'): fabric_pb2.PeersMessage.SerializeToString,
+ }
+ method_implementations = {
+ ('protos.Openchain', 'GetBlockByNumber'): face_utilities.unary_unary_inline(servicer.GetBlockByNumber),
+ ('protos.Openchain', 'GetBlockCount'): face_utilities.unary_unary_inline(servicer.GetBlockCount),
+ ('protos.Openchain', 'GetBlockchainInfo'): face_utilities.unary_unary_inline(servicer.GetBlockchainInfo),
+ ('protos.Openchain', 'GetPeers'): face_utilities.unary_unary_inline(servicer.GetPeers),
+ }
+ server_options = beta_implementations.server_options(request_deserializers=request_deserializers, response_serializers=response_serializers, thread_pool=pool, thread_pool_size=pool_size, default_timeout=default_timeout, maximum_timeout=maximum_timeout)
+ return beta_implementations.server(method_implementations, options=server_options)
+
+def beta_create_Openchain_stub(channel, host=None, metadata_transformer=None, pool=None, pool_size=None):
+ import google.protobuf.empty_pb2
+ import fabric_pb2
+ import api_pb2
+ import fabric_pb2
+ import google.protobuf.empty_pb2
+ import api_pb2
+ import google.protobuf.empty_pb2
+ import fabric_pb2
+ request_serializers = {
+ ('protos.Openchain', 'GetBlockByNumber'): api_pb2.BlockNumber.SerializeToString,
+ ('protos.Openchain', 'GetBlockCount'): google.protobuf.empty_pb2.Empty.SerializeToString,
+ ('protos.Openchain', 'GetBlockchainInfo'): google.protobuf.empty_pb2.Empty.SerializeToString,
+ ('protos.Openchain', 'GetPeers'): google.protobuf.empty_pb2.Empty.SerializeToString,
+ }
+ response_deserializers = {
+ ('protos.Openchain', 'GetBlockByNumber'): fabric_pb2.Block.FromString,
+ ('protos.Openchain', 'GetBlockCount'): api_pb2.BlockCount.FromString,
+ ('protos.Openchain', 'GetBlockchainInfo'): fabric_pb2.BlockchainInfo.FromString,
+ ('protos.Openchain', 'GetPeers'): fabric_pb2.PeersMessage.FromString,
+ }
+ cardinalities = {
+ 'GetBlockByNumber': cardinality.Cardinality.UNARY_UNARY,
+ 'GetBlockCount': cardinality.Cardinality.UNARY_UNARY,
+ 'GetBlockchainInfo': cardinality.Cardinality.UNARY_UNARY,
+ 'GetPeers': cardinality.Cardinality.UNARY_UNARY,
+ }
+ stub_options = beta_implementations.stub_options(host=host, metadata_transformer=metadata_transformer, request_serializers=request_serializers, response_deserializers=response_deserializers, thread_pool=pool, thread_pool_size=pool_size)
+ return beta_implementations.dynamic_stub(channel, 'protos.Openchain', cardinalities, options=stub_options)
+# @@protoc_insertion_point(module_scope)
diff --git a/bddtests/ca_pb2.py b/bddtests/ca_pb2.py
new file mode 100644
index 00000000000..907a2618201
--- /dev/null
+++ b/bddtests/ca_pb2.py
@@ -0,0 +1,2677 @@
+# Generated by the protocol buffer compiler. DO NOT EDIT!
+# source: ca.proto
+
+import sys
+_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
+from google.protobuf.internal import enum_type_wrapper
+from google.protobuf import descriptor as _descriptor
+from google.protobuf import message as _message
+from google.protobuf import reflection as _reflection
+from google.protobuf import symbol_database as _symbol_database
+from google.protobuf import descriptor_pb2
+# @@protoc_insertion_point(imports)
+
+_sym_db = _symbol_database.Default()
+
+
+from google.protobuf import timestamp_pb2 as google_dot_protobuf_dot_timestamp__pb2
+
+
+DESCRIPTOR = _descriptor.FileDescriptor(
+ name='ca.proto',
+ package='protos',
+ syntax='proto3',
+ serialized_pb=_b('\n\x08\x63\x61.proto\x12\x06protos\x1a\x1fgoogle/protobuf/timestamp.proto\"`\n\x08\x43\x41Status\x12+\n\x06status\x18\x01 \x01(\x0e\x32\x1b.protos.CAStatus.StatusCode\"\'\n\nStatusCode\x12\x06\n\x02OK\x10\x00\x12\x11\n\rUNKNOWN_ERROR\x10\x01\"\x07\n\x05\x45mpty\"\x16\n\x08Identity\x12\n\n\x02id\x18\x01 \x01(\t\"\x14\n\x05Token\x12\x0b\n\x03tok\x18\x01 \x01(\x0c\"\x14\n\x04Hash\x12\x0c\n\x04hash\x18\x01 \x01(\x0c\":\n\tPublicKey\x12 \n\x04type\x18\x01 \x01(\x0e\x32\x12.protos.CryptoType\x12\x0b\n\x03key\x18\x02 \x01(\x0c\";\n\nPrivateKey\x12 \n\x04type\x18\x01 \x01(\x0e\x32\x12.protos.CryptoType\x12\x0b\n\x03key\x18\x02 \x01(\x0c\"C\n\tSignature\x12 \n\x04type\x18\x01 \x01(\x0e\x32\x12.protos.CryptoType\x12\t\n\x01r\x18\x02 \x01(\x0c\x12\t\n\x01s\x18\x03 \x01(\x0c\"q\n\x0fRegisterUserReq\x12\x1c\n\x02id\x18\x01 \x01(\x0b\x32\x10.protos.Identity\x12\x1a\n\x04role\x18\x02 \x01(\x0e\x32\x0c.protos.Role\x12\x0f\n\x07\x61\x63\x63ount\x18\x03 \x01(\t\x12\x13\n\x0b\x61\x66\x66iliation\x18\x04 \x01(\t\"k\n\x0eReadUserSetReq\x12\x1d\n\x03req\x18\x01 \x01(\x0b\x32\x10.protos.Identity\x12\x1a\n\x04role\x18\x02 \x01(\x0e\x32\x0c.protos.Role\x12\x1e\n\x03sig\x18\x03 \x01(\x0b\x32\x11.protos.Signature\"@\n\x04User\x12\x1c\n\x02id\x18\x01 \x01(\x0b\x32\x10.protos.Identity\x12\x1a\n\x04role\x18\x02 \x01(\x0e\x32\x0c.protos.Role\"&\n\x07UserSet\x12\x1b\n\x05users\x18\x01 \x03(\x0b\x32\x0c.protos.User\"\xd3\x01\n\x0e\x45\x43\x65rtCreateReq\x12&\n\x02ts\x18\x01 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x1c\n\x02id\x18\x02 \x01(\x0b\x32\x10.protos.Identity\x12\x1a\n\x03tok\x18\x03 \x01(\x0b\x32\r.protos.Token\x12\x1f\n\x04sign\x18\x04 \x01(\x0b\x32\x11.protos.PublicKey\x12\x1e\n\x03\x65nc\x18\x05 \x01(\x0b\x32\x11.protos.PublicKey\x12\x1e\n\x03sig\x18\x06 \x01(\x0b\x32\x11.protos.Signature\"}\n\x0f\x45\x43\x65rtCreateResp\x12\x1f\n\x05\x63\x65rts\x18\x01 \x01(\x0b\x32\x10.protos.CertPair\x12\x1c\n\x05\x63hain\x18\x02 \x01(\x0b\x32\r.protos.Token\x12\x0f\n\x07pkchain\x18\x04 \x01(\x0c\x12\x1a\n\x03tok\x18\x03 \x01(\x0b\x32\r.protos.Token\",\n\x0c\x45\x43\x65rtReadReq\x12\x1c\n\x02id\x18\x01 \x01(\x0b\x32\x10.protos.Identity\"j\n\x0e\x45\x43\x65rtRevokeReq\x12\x1c\n\x02id\x18\x01 \x01(\x0b\x32\x10.protos.Identity\x12\x1a\n\x04\x63\x65rt\x18\x02 \x01(\x0b\x32\x0c.protos.Cert\x12\x1e\n\x03sig\x18\x03 \x01(\x0b\x32\x11.protos.Signature\"K\n\x0b\x45\x43\x65rtCRLReq\x12\x1c\n\x02id\x18\x01 \x01(\x0b\x32\x10.protos.Identity\x12\x1e\n\x03sig\x18\x02 \x01(\x0b\x32\x11.protos.Signature\"\x96\x01\n\x0eTCertCreateReq\x12&\n\x02ts\x18\x01 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x1c\n\x02id\x18\x02 \x01(\x0b\x32\x10.protos.Identity\x12\x1e\n\x03pub\x18\x03 \x01(\x0b\x32\x11.protos.PublicKey\x12\x1e\n\x03sig\x18\x04 \x01(\x0b\x32\x11.protos.Signature\"-\n\x0fTCertCreateResp\x12\x1a\n\x04\x63\x65rt\x18\x01 \x01(\x0b\x32\x0c.protos.Cert\"\xb2\x01\n\x11TCertCreateSetReq\x12&\n\x02ts\x18\x01 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x1c\n\x02id\x18\x02 \x01(\x0b\x32\x10.protos.Identity\x12\x0b\n\x03num\x18\x03 \x01(\r\x12*\n\nattributes\x18\x04 \x03(\x0b\x32\x16.protos.TCertAttribute\x12\x1e\n\x03sig\x18\x05 \x01(\x0b\x32\x11.protos.Signature\"?\n\x0eTCertAttribute\x12\x15\n\rattributeName\x18\x01 \x01(\t\x12\x16\n\x0e\x61ttributeValue\x18\x02 \x01(\t\"4\n\x12TCertCreateSetResp\x12\x1e\n\x05\x63\x65rts\x18\x01 \x01(\x0b\x32\x0f.protos.CertSet\"\xaf\x01\n\x0cTCertReadReq\x12&\n\x02ts\x18\x01 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x1a\n\x04hash\x18\x02 \x01(\x0b\x32\x0c.protos.Hash\x12\x1d\n\x03req\x18\x03 \x01(\x0b\x32\x10.protos.Identity\x12\x1c\n\x02id\x18\x04 \x01(\x0b\x32\x10.protos.Identity\x12\x1e\n\x03sig\x18\x05 \x01(\x0b\x32\x11.protos.Signature\"\xa3\x01\n\x0fTCertReadSetReq\x12&\n\x02ts\x18\x01 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x1d\n\x03req\x18\x02 \x01(\x0b\x32\x10.protos.Identity\x12\x1c\n\x02id\x18\x03 \x01(\x0b\x32\x10.protos.Identity\x12\x0b\n\x03num\x18\x04 \x01(\r\x12\x1e\n\x03sig\x18\x05 \x01(\x0b\x32\x11.protos.Signature\"\xc1\x01\n\x10TCertReadSetsReq\x12)\n\x05\x62\x65gin\x18\x01 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\'\n\x03\x65nd\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x1d\n\x03req\x18\x03 \x01(\x0b\x32\x10.protos.Identity\x12\x1a\n\x04role\x18\x04 \x01(\x0e\x32\x0c.protos.Role\x12\x1e\n\x03sig\x18\x05 \x01(\x0b\x32\x11.protos.Signature\"j\n\x0eTCertRevokeReq\x12\x1c\n\x02id\x18\x01 \x01(\x0b\x32\x10.protos.Identity\x12\x1a\n\x04\x63\x65rt\x18\x02 \x01(\x0b\x32\x0c.protos.Cert\x12\x1e\n\x03sig\x18\x03 \x01(\x0b\x32\x11.protos.Signature\"y\n\x11TCertRevokeSetReq\x12\x1c\n\x02id\x18\x01 \x01(\x0b\x32\x10.protos.Identity\x12&\n\x02ts\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x1e\n\x03sig\x18\x03 \x01(\x0b\x32\x11.protos.Signature\"K\n\x0bTCertCRLReq\x12\x1c\n\x02id\x18\x01 \x01(\x0b\x32\x10.protos.Identity\x12\x1e\n\x03sig\x18\x02 \x01(\x0b\x32\x11.protos.Signature\"\x98\x01\n\x10TLSCertCreateReq\x12&\n\x02ts\x18\x01 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x1c\n\x02id\x18\x02 \x01(\x0b\x32\x10.protos.Identity\x12\x1e\n\x03pub\x18\x03 \x01(\x0b\x32\x11.protos.PublicKey\x12\x1e\n\x03sig\x18\x04 \x01(\x0b\x32\x11.protos.Signature\"O\n\x11TLSCertCreateResp\x12\x1a\n\x04\x63\x65rt\x18\x01 \x01(\x0b\x32\x0c.protos.Cert\x12\x1e\n\x08rootCert\x18\x02 \x01(\x0b\x32\x0c.protos.Cert\".\n\x0eTLSCertReadReq\x12\x1c\n\x02id\x18\x01 \x01(\x0b\x32\x10.protos.Identity\"l\n\x10TLSCertRevokeReq\x12\x1c\n\x02id\x18\x01 \x01(\x0b\x32\x10.protos.Identity\x12\x1a\n\x04\x63\x65rt\x18\x02 \x01(\x0b\x32\x0c.protos.Cert\x12\x1e\n\x03sig\x18\x03 \x01(\x0b\x32\x11.protos.Signature\"\x14\n\x04\x43\x65rt\x12\x0c\n\x04\x63\x65rt\x18\x01 \x01(\x0c\"i\n\x05TCert\x12\x0c\n\x04\x63\x65rt\x18\x01 \x01(\x0c\x12%\n\x04keys\x18\x02 \x03(\x0b\x32\x17.protos.TCert.KeysEntry\x1a+\n\tKeysEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\x0c:\x02\x38\x01\"z\n\x07\x43\x65rtSet\x12&\n\x02ts\x18\x01 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x1c\n\x02id\x18\x02 \x01(\x0b\x32\x10.protos.Identity\x12\x0b\n\x03key\x18\x03 \x01(\x0c\x12\x1c\n\x05\x63\x65rts\x18\x04 \x03(\x0b\x32\r.protos.TCert\")\n\x08\x43\x65rtSets\x12\x1d\n\x04sets\x18\x01 \x03(\x0b\x32\x0f.protos.CertSet\"%\n\x08\x43\x65rtPair\x12\x0c\n\x04sign\x18\x01 \x01(\x0c\x12\x0b\n\x03\x65nc\x18\x02 \x01(\x0c*)\n\nCryptoType\x12\t\n\x05\x45\x43\x44SA\x10\x00\x12\x07\n\x03RSA\x10\x01\x12\x07\n\x03\x44SA\x10\x02*M\n\x04Role\x12\x08\n\x04NONE\x10\x00\x12\n\n\x06\x43LIENT\x10\x01\x12\x08\n\x04PEER\x10\x02\x12\r\n\tVALIDATOR\x10\x04\x12\x0b\n\x07\x41UDITOR\x10\x08\x12\t\n\x03\x41LL\x10\xff\xff\x03\x32\xb9\x02\n\x04\x45\x43\x41P\x12\x30\n\x11ReadCACertificate\x12\r.protos.Empty\x1a\x0c.protos.Cert\x12H\n\x15\x43reateCertificatePair\x12\x16.protos.ECertCreateReq\x1a\x17.protos.ECertCreateResp\x12=\n\x13ReadCertificatePair\x12\x14.protos.ECertReadReq\x1a\x10.protos.CertPair\x12\x33\n\x15ReadCertificateByHash\x12\x0c.protos.Hash\x1a\x0c.protos.Cert\x12\x41\n\x15RevokeCertificatePair\x12\x16.protos.ECertRevokeReq\x1a\x10.protos.CAStatus2\xea\x01\n\x04\x45\x43\x41\x41\x12\x36\n\x0cRegisterUser\x12\x17.protos.RegisterUserReq\x1a\r.protos.Token\x12\x36\n\x0bReadUserSet\x12\x16.protos.ReadUserSetReq\x1a\x0f.protos.UserSet\x12=\n\x11RevokeCertificate\x12\x16.protos.ECertRevokeReq\x1a\x10.protos.CAStatus\x12\x33\n\nPublishCRL\x12\x13.protos.ECertCRLReq\x1a\x10.protos.CAStatus2\x82\x03\n\x04TCAP\x12\x30\n\x11ReadCACertificate\x12\r.protos.Empty\x1a\x0c.protos.Cert\x12M\n\x14\x43reateCertificateSet\x12\x19.protos.TCertCreateSetReq\x1a\x1a.protos.TCertCreateSetResp\x12\x35\n\x0fReadCertificate\x12\x14.protos.TCertReadReq\x1a\x0c.protos.Cert\x12>\n\x12ReadCertificateSet\x12\x17.protos.TCertReadSetReq\x1a\x0f.protos.CertSet\x12=\n\x11RevokeCertificate\x12\x16.protos.TCertRevokeReq\x1a\x10.protos.CAStatus\x12\x43\n\x14RevokeCertificateSet\x12\x19.protos.TCertRevokeSetReq\x1a\x10.protos.CAStatus2\x82\x02\n\x04TCAA\x12\x41\n\x13ReadCertificateSets\x12\x18.protos.TCertReadSetsReq\x1a\x10.protos.CertSets\x12=\n\x11RevokeCertificate\x12\x16.protos.TCertRevokeReq\x1a\x10.protos.CAStatus\x12\x43\n\x14RevokeCertificateSet\x12\x19.protos.TCertRevokeSetReq\x1a\x10.protos.CAStatus\x12\x33\n\nPublishCRL\x12\x13.protos.TCertCRLReq\x1a\x10.protos.CAStatus2\xfe\x01\n\x06TLSCAP\x12\x30\n\x11ReadCACertificate\x12\r.protos.Empty\x1a\x0c.protos.Cert\x12H\n\x11\x43reateCertificate\x12\x18.protos.TLSCertCreateReq\x1a\x19.protos.TLSCertCreateResp\x12\x37\n\x0fReadCertificate\x12\x16.protos.TLSCertReadReq\x1a\x0c.protos.Cert\x12?\n\x11RevokeCertificate\x12\x18.protos.TLSCertRevokeReq\x1a\x10.protos.CAStatus2I\n\x06TLSCAA\x12?\n\x11RevokeCertificate\x12\x18.protos.TLSCertRevokeReq\x1a\x10.protos.CAStatusb\x06proto3')
+ ,
+ dependencies=[google_dot_protobuf_dot_timestamp__pb2.DESCRIPTOR,])
+_sym_db.RegisterFileDescriptor(DESCRIPTOR)
+
+_CRYPTOTYPE = _descriptor.EnumDescriptor(
+ name='CryptoType',
+ full_name='protos.CryptoType',
+ filename=None,
+ file=DESCRIPTOR,
+ values=[
+ _descriptor.EnumValueDescriptor(
+ name='ECDSA', index=0, number=0,
+ options=None,
+ type=None),
+ _descriptor.EnumValueDescriptor(
+ name='RSA', index=1, number=1,
+ options=None,
+ type=None),
+ _descriptor.EnumValueDescriptor(
+ name='DSA', index=2, number=2,
+ options=None,
+ type=None),
+ ],
+ containing_type=None,
+ options=None,
+ serialized_start=3397,
+ serialized_end=3438,
+)
+_sym_db.RegisterEnumDescriptor(_CRYPTOTYPE)
+
+CryptoType = enum_type_wrapper.EnumTypeWrapper(_CRYPTOTYPE)
+_ROLE = _descriptor.EnumDescriptor(
+ name='Role',
+ full_name='protos.Role',
+ filename=None,
+ file=DESCRIPTOR,
+ values=[
+ _descriptor.EnumValueDescriptor(
+ name='NONE', index=0, number=0,
+ options=None,
+ type=None),
+ _descriptor.EnumValueDescriptor(
+ name='CLIENT', index=1, number=1,
+ options=None,
+ type=None),
+ _descriptor.EnumValueDescriptor(
+ name='PEER', index=2, number=2,
+ options=None,
+ type=None),
+ _descriptor.EnumValueDescriptor(
+ name='VALIDATOR', index=3, number=4,
+ options=None,
+ type=None),
+ _descriptor.EnumValueDescriptor(
+ name='AUDITOR', index=4, number=8,
+ options=None,
+ type=None),
+ _descriptor.EnumValueDescriptor(
+ name='ALL', index=5, number=65535,
+ options=None,
+ type=None),
+ ],
+ containing_type=None,
+ options=None,
+ serialized_start=3440,
+ serialized_end=3517,
+)
+_sym_db.RegisterEnumDescriptor(_ROLE)
+
+Role = enum_type_wrapper.EnumTypeWrapper(_ROLE)
+ECDSA = 0
+RSA = 1
+DSA = 2
+NONE = 0
+CLIENT = 1
+PEER = 2
+VALIDATOR = 4
+AUDITOR = 8
+ALL = 65535
+
+
+_CASTATUS_STATUSCODE = _descriptor.EnumDescriptor(
+ name='StatusCode',
+ full_name='protos.CAStatus.StatusCode',
+ filename=None,
+ file=DESCRIPTOR,
+ values=[
+ _descriptor.EnumValueDescriptor(
+ name='OK', index=0, number=0,
+ options=None,
+ type=None),
+ _descriptor.EnumValueDescriptor(
+ name='UNKNOWN_ERROR', index=1, number=1,
+ options=None,
+ type=None),
+ ],
+ containing_type=None,
+ options=None,
+ serialized_start=110,
+ serialized_end=149,
+)
+_sym_db.RegisterEnumDescriptor(_CASTATUS_STATUSCODE)
+
+
+_CASTATUS = _descriptor.Descriptor(
+ name='CAStatus',
+ full_name='protos.CAStatus',
+ filename=None,
+ file=DESCRIPTOR,
+ containing_type=None,
+ fields=[
+ _descriptor.FieldDescriptor(
+ name='status', full_name='protos.CAStatus.status', index=0,
+ number=1, type=14, cpp_type=8, label=1,
+ has_default_value=False, default_value=0,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None),
+ ],
+ extensions=[
+ ],
+ nested_types=[],
+ enum_types=[
+ _CASTATUS_STATUSCODE,
+ ],
+ options=None,
+ is_extendable=False,
+ syntax='proto3',
+ extension_ranges=[],
+ oneofs=[
+ ],
+ serialized_start=53,
+ serialized_end=149,
+)
+
+
+_EMPTY = _descriptor.Descriptor(
+ name='Empty',
+ full_name='protos.Empty',
+ filename=None,
+ file=DESCRIPTOR,
+ containing_type=None,
+ fields=[
+ ],
+ extensions=[
+ ],
+ nested_types=[],
+ enum_types=[
+ ],
+ options=None,
+ is_extendable=False,
+ syntax='proto3',
+ extension_ranges=[],
+ oneofs=[
+ ],
+ serialized_start=151,
+ serialized_end=158,
+)
+
+
+_IDENTITY = _descriptor.Descriptor(
+ name='Identity',
+ full_name='protos.Identity',
+ filename=None,
+ file=DESCRIPTOR,
+ containing_type=None,
+ fields=[
+ _descriptor.FieldDescriptor(
+ name='id', full_name='protos.Identity.id', index=0,
+ number=1, type=9, cpp_type=9, label=1,
+ has_default_value=False, default_value=_b("").decode('utf-8'),
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None),
+ ],
+ extensions=[
+ ],
+ nested_types=[],
+ enum_types=[
+ ],
+ options=None,
+ is_extendable=False,
+ syntax='proto3',
+ extension_ranges=[],
+ oneofs=[
+ ],
+ serialized_start=160,
+ serialized_end=182,
+)
+
+
+_TOKEN = _descriptor.Descriptor(
+ name='Token',
+ full_name='protos.Token',
+ filename=None,
+ file=DESCRIPTOR,
+ containing_type=None,
+ fields=[
+ _descriptor.FieldDescriptor(
+ name='tok', full_name='protos.Token.tok', index=0,
+ number=1, type=12, cpp_type=9, label=1,
+ has_default_value=False, default_value=_b(""),
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None),
+ ],
+ extensions=[
+ ],
+ nested_types=[],
+ enum_types=[
+ ],
+ options=None,
+ is_extendable=False,
+ syntax='proto3',
+ extension_ranges=[],
+ oneofs=[
+ ],
+ serialized_start=184,
+ serialized_end=204,
+)
+
+
+_HASH = _descriptor.Descriptor(
+ name='Hash',
+ full_name='protos.Hash',
+ filename=None,
+ file=DESCRIPTOR,
+ containing_type=None,
+ fields=[
+ _descriptor.FieldDescriptor(
+ name='hash', full_name='protos.Hash.hash', index=0,
+ number=1, type=12, cpp_type=9, label=1,
+ has_default_value=False, default_value=_b(""),
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None),
+ ],
+ extensions=[
+ ],
+ nested_types=[],
+ enum_types=[
+ ],
+ options=None,
+ is_extendable=False,
+ syntax='proto3',
+ extension_ranges=[],
+ oneofs=[
+ ],
+ serialized_start=206,
+ serialized_end=226,
+)
+
+
+_PUBLICKEY = _descriptor.Descriptor(
+ name='PublicKey',
+ full_name='protos.PublicKey',
+ filename=None,
+ file=DESCRIPTOR,
+ containing_type=None,
+ fields=[
+ _descriptor.FieldDescriptor(
+ name='type', full_name='protos.PublicKey.type', index=0,
+ number=1, type=14, cpp_type=8, label=1,
+ has_default_value=False, default_value=0,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None),
+ _descriptor.FieldDescriptor(
+ name='key', full_name='protos.PublicKey.key', index=1,
+ number=2, type=12, cpp_type=9, label=1,
+ has_default_value=False, default_value=_b(""),
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None),
+ ],
+ extensions=[
+ ],
+ nested_types=[],
+ enum_types=[
+ ],
+ options=None,
+ is_extendable=False,
+ syntax='proto3',
+ extension_ranges=[],
+ oneofs=[
+ ],
+ serialized_start=228,
+ serialized_end=286,
+)
+
+
+_PRIVATEKEY = _descriptor.Descriptor(
+ name='PrivateKey',
+ full_name='protos.PrivateKey',
+ filename=None,
+ file=DESCRIPTOR,
+ containing_type=None,
+ fields=[
+ _descriptor.FieldDescriptor(
+ name='type', full_name='protos.PrivateKey.type', index=0,
+ number=1, type=14, cpp_type=8, label=1,
+ has_default_value=False, default_value=0,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None),
+ _descriptor.FieldDescriptor(
+ name='key', full_name='protos.PrivateKey.key', index=1,
+ number=2, type=12, cpp_type=9, label=1,
+ has_default_value=False, default_value=_b(""),
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None),
+ ],
+ extensions=[
+ ],
+ nested_types=[],
+ enum_types=[
+ ],
+ options=None,
+ is_extendable=False,
+ syntax='proto3',
+ extension_ranges=[],
+ oneofs=[
+ ],
+ serialized_start=288,
+ serialized_end=347,
+)
+
+
+_SIGNATURE = _descriptor.Descriptor(
+ name='Signature',
+ full_name='protos.Signature',
+ filename=None,
+ file=DESCRIPTOR,
+ containing_type=None,
+ fields=[
+ _descriptor.FieldDescriptor(
+ name='type', full_name='protos.Signature.type', index=0,
+ number=1, type=14, cpp_type=8, label=1,
+ has_default_value=False, default_value=0,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None),
+ _descriptor.FieldDescriptor(
+ name='r', full_name='protos.Signature.r', index=1,
+ number=2, type=12, cpp_type=9, label=1,
+ has_default_value=False, default_value=_b(""),
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None),
+ _descriptor.FieldDescriptor(
+ name='s', full_name='protos.Signature.s', index=2,
+ number=3, type=12, cpp_type=9, label=1,
+ has_default_value=False, default_value=_b(""),
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None),
+ ],
+ extensions=[
+ ],
+ nested_types=[],
+ enum_types=[
+ ],
+ options=None,
+ is_extendable=False,
+ syntax='proto3',
+ extension_ranges=[],
+ oneofs=[
+ ],
+ serialized_start=349,
+ serialized_end=416,
+)
+
+
+_REGISTERUSERREQ = _descriptor.Descriptor(
+ name='RegisterUserReq',
+ full_name='protos.RegisterUserReq',
+ filename=None,
+ file=DESCRIPTOR,
+ containing_type=None,
+ fields=[
+ _descriptor.FieldDescriptor(
+ name='id', full_name='protos.RegisterUserReq.id', index=0,
+ number=1, type=11, cpp_type=10, label=1,
+ has_default_value=False, default_value=None,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None),
+ _descriptor.FieldDescriptor(
+ name='role', full_name='protos.RegisterUserReq.role', index=1,
+ number=2, type=14, cpp_type=8, label=1,
+ has_default_value=False, default_value=0,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None),
+ _descriptor.FieldDescriptor(
+ name='account', full_name='protos.RegisterUserReq.account', index=2,
+ number=3, type=9, cpp_type=9, label=1,
+ has_default_value=False, default_value=_b("").decode('utf-8'),
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None),
+ _descriptor.FieldDescriptor(
+ name='affiliation', full_name='protos.RegisterUserReq.affiliation', index=3,
+ number=4, type=9, cpp_type=9, label=1,
+ has_default_value=False, default_value=_b("").decode('utf-8'),
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None),
+ ],
+ extensions=[
+ ],
+ nested_types=[],
+ enum_types=[
+ ],
+ options=None,
+ is_extendable=False,
+ syntax='proto3',
+ extension_ranges=[],
+ oneofs=[
+ ],
+ serialized_start=418,
+ serialized_end=531,
+)
+
+
+_READUSERSETREQ = _descriptor.Descriptor(
+ name='ReadUserSetReq',
+ full_name='protos.ReadUserSetReq',
+ filename=None,
+ file=DESCRIPTOR,
+ containing_type=None,
+ fields=[
+ _descriptor.FieldDescriptor(
+ name='req', full_name='protos.ReadUserSetReq.req', index=0,
+ number=1, type=11, cpp_type=10, label=1,
+ has_default_value=False, default_value=None,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None),
+ _descriptor.FieldDescriptor(
+ name='role', full_name='protos.ReadUserSetReq.role', index=1,
+ number=2, type=14, cpp_type=8, label=1,
+ has_default_value=False, default_value=0,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None),
+ _descriptor.FieldDescriptor(
+ name='sig', full_name='protos.ReadUserSetReq.sig', index=2,
+ number=3, type=11, cpp_type=10, label=1,
+ has_default_value=False, default_value=None,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None),
+ ],
+ extensions=[
+ ],
+ nested_types=[],
+ enum_types=[
+ ],
+ options=None,
+ is_extendable=False,
+ syntax='proto3',
+ extension_ranges=[],
+ oneofs=[
+ ],
+ serialized_start=533,
+ serialized_end=640,
+)
+
+
+_USER = _descriptor.Descriptor(
+ name='User',
+ full_name='protos.User',
+ filename=None,
+ file=DESCRIPTOR,
+ containing_type=None,
+ fields=[
+ _descriptor.FieldDescriptor(
+ name='id', full_name='protos.User.id', index=0,
+ number=1, type=11, cpp_type=10, label=1,
+ has_default_value=False, default_value=None,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None),
+ _descriptor.FieldDescriptor(
+ name='role', full_name='protos.User.role', index=1,
+ number=2, type=14, cpp_type=8, label=1,
+ has_default_value=False, default_value=0,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None),
+ ],
+ extensions=[
+ ],
+ nested_types=[],
+ enum_types=[
+ ],
+ options=None,
+ is_extendable=False,
+ syntax='proto3',
+ extension_ranges=[],
+ oneofs=[
+ ],
+ serialized_start=642,
+ serialized_end=706,
+)
+
+
+_USERSET = _descriptor.Descriptor(
+ name='UserSet',
+ full_name='protos.UserSet',
+ filename=None,
+ file=DESCRIPTOR,
+ containing_type=None,
+ fields=[
+ _descriptor.FieldDescriptor(
+ name='users', full_name='protos.UserSet.users', index=0,
+ number=1, type=11, cpp_type=10, label=3,
+ has_default_value=False, default_value=[],
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None),
+ ],
+ extensions=[
+ ],
+ nested_types=[],
+ enum_types=[
+ ],
+ options=None,
+ is_extendable=False,
+ syntax='proto3',
+ extension_ranges=[],
+ oneofs=[
+ ],
+ serialized_start=708,
+ serialized_end=746,
+)
+
+
+_ECERTCREATEREQ = _descriptor.Descriptor(
+ name='ECertCreateReq',
+ full_name='protos.ECertCreateReq',
+ filename=None,
+ file=DESCRIPTOR,
+ containing_type=None,
+ fields=[
+ _descriptor.FieldDescriptor(
+ name='ts', full_name='protos.ECertCreateReq.ts', index=0,
+ number=1, type=11, cpp_type=10, label=1,
+ has_default_value=False, default_value=None,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None),
+ _descriptor.FieldDescriptor(
+ name='id', full_name='protos.ECertCreateReq.id', index=1,
+ number=2, type=11, cpp_type=10, label=1,
+ has_default_value=False, default_value=None,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None),
+ _descriptor.FieldDescriptor(
+ name='tok', full_name='protos.ECertCreateReq.tok', index=2,
+ number=3, type=11, cpp_type=10, label=1,
+ has_default_value=False, default_value=None,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None),
+ _descriptor.FieldDescriptor(
+ name='sign', full_name='protos.ECertCreateReq.sign', index=3,
+ number=4, type=11, cpp_type=10, label=1,
+ has_default_value=False, default_value=None,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None),
+ _descriptor.FieldDescriptor(
+ name='enc', full_name='protos.ECertCreateReq.enc', index=4,
+ number=5, type=11, cpp_type=10, label=1,
+ has_default_value=False, default_value=None,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None),
+ _descriptor.FieldDescriptor(
+ name='sig', full_name='protos.ECertCreateReq.sig', index=5,
+ number=6, type=11, cpp_type=10, label=1,
+ has_default_value=False, default_value=None,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None),
+ ],
+ extensions=[
+ ],
+ nested_types=[],
+ enum_types=[
+ ],
+ options=None,
+ is_extendable=False,
+ syntax='proto3',
+ extension_ranges=[],
+ oneofs=[
+ ],
+ serialized_start=749,
+ serialized_end=960,
+)
+
+
+_ECERTCREATERESP = _descriptor.Descriptor(
+ name='ECertCreateResp',
+ full_name='protos.ECertCreateResp',
+ filename=None,
+ file=DESCRIPTOR,
+ containing_type=None,
+ fields=[
+ _descriptor.FieldDescriptor(
+ name='certs', full_name='protos.ECertCreateResp.certs', index=0,
+ number=1, type=11, cpp_type=10, label=1,
+ has_default_value=False, default_value=None,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None),
+ _descriptor.FieldDescriptor(
+ name='chain', full_name='protos.ECertCreateResp.chain', index=1,
+ number=2, type=11, cpp_type=10, label=1,
+ has_default_value=False, default_value=None,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None),
+ _descriptor.FieldDescriptor(
+ name='pkchain', full_name='protos.ECertCreateResp.pkchain', index=2,
+ number=4, type=12, cpp_type=9, label=1,
+ has_default_value=False, default_value=_b(""),
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None),
+ _descriptor.FieldDescriptor(
+ name='tok', full_name='protos.ECertCreateResp.tok', index=3,
+ number=3, type=11, cpp_type=10, label=1,
+ has_default_value=False, default_value=None,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None),
+ ],
+ extensions=[
+ ],
+ nested_types=[],
+ enum_types=[
+ ],
+ options=None,
+ is_extendable=False,
+ syntax='proto3',
+ extension_ranges=[],
+ oneofs=[
+ ],
+ serialized_start=962,
+ serialized_end=1087,
+)
+
+
+_ECERTREADREQ = _descriptor.Descriptor(
+ name='ECertReadReq',
+ full_name='protos.ECertReadReq',
+ filename=None,
+ file=DESCRIPTOR,
+ containing_type=None,
+ fields=[
+ _descriptor.FieldDescriptor(
+ name='id', full_name='protos.ECertReadReq.id', index=0,
+ number=1, type=11, cpp_type=10, label=1,
+ has_default_value=False, default_value=None,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None),
+ ],
+ extensions=[
+ ],
+ nested_types=[],
+ enum_types=[
+ ],
+ options=None,
+ is_extendable=False,
+ syntax='proto3',
+ extension_ranges=[],
+ oneofs=[
+ ],
+ serialized_start=1089,
+ serialized_end=1133,
+)
+
+
+_ECERTREVOKEREQ = _descriptor.Descriptor(
+ name='ECertRevokeReq',
+ full_name='protos.ECertRevokeReq',
+ filename=None,
+ file=DESCRIPTOR,
+ containing_type=None,
+ fields=[
+ _descriptor.FieldDescriptor(
+ name='id', full_name='protos.ECertRevokeReq.id', index=0,
+ number=1, type=11, cpp_type=10, label=1,
+ has_default_value=False, default_value=None,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None),
+ _descriptor.FieldDescriptor(
+ name='cert', full_name='protos.ECertRevokeReq.cert', index=1,
+ number=2, type=11, cpp_type=10, label=1,
+ has_default_value=False, default_value=None,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None),
+ _descriptor.FieldDescriptor(
+ name='sig', full_name='protos.ECertRevokeReq.sig', index=2,
+ number=3, type=11, cpp_type=10, label=1,
+ has_default_value=False, default_value=None,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None),
+ ],
+ extensions=[
+ ],
+ nested_types=[],
+ enum_types=[
+ ],
+ options=None,
+ is_extendable=False,
+ syntax='proto3',
+ extension_ranges=[],
+ oneofs=[
+ ],
+ serialized_start=1135,
+ serialized_end=1241,
+)
+
+
+_ECERTCRLREQ = _descriptor.Descriptor(
+ name='ECertCRLReq',
+ full_name='protos.ECertCRLReq',
+ filename=None,
+ file=DESCRIPTOR,
+ containing_type=None,
+ fields=[
+ _descriptor.FieldDescriptor(
+ name='id', full_name='protos.ECertCRLReq.id', index=0,
+ number=1, type=11, cpp_type=10, label=1,
+ has_default_value=False, default_value=None,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None),
+ _descriptor.FieldDescriptor(
+ name='sig', full_name='protos.ECertCRLReq.sig', index=1,
+ number=2, type=11, cpp_type=10, label=1,
+ has_default_value=False, default_value=None,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None),
+ ],
+ extensions=[
+ ],
+ nested_types=[],
+ enum_types=[
+ ],
+ options=None,
+ is_extendable=False,
+ syntax='proto3',
+ extension_ranges=[],
+ oneofs=[
+ ],
+ serialized_start=1243,
+ serialized_end=1318,
+)
+
+
+_TCERTCREATEREQ = _descriptor.Descriptor(
+ name='TCertCreateReq',
+ full_name='protos.TCertCreateReq',
+ filename=None,
+ file=DESCRIPTOR,
+ containing_type=None,
+ fields=[
+ _descriptor.FieldDescriptor(
+ name='ts', full_name='protos.TCertCreateReq.ts', index=0,
+ number=1, type=11, cpp_type=10, label=1,
+ has_default_value=False, default_value=None,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None),
+ _descriptor.FieldDescriptor(
+ name='id', full_name='protos.TCertCreateReq.id', index=1,
+ number=2, type=11, cpp_type=10, label=1,
+ has_default_value=False, default_value=None,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None),
+ _descriptor.FieldDescriptor(
+ name='pub', full_name='protos.TCertCreateReq.pub', index=2,
+ number=3, type=11, cpp_type=10, label=1,
+ has_default_value=False, default_value=None,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None),
+ _descriptor.FieldDescriptor(
+ name='sig', full_name='protos.TCertCreateReq.sig', index=3,
+ number=4, type=11, cpp_type=10, label=1,
+ has_default_value=False, default_value=None,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None),
+ ],
+ extensions=[
+ ],
+ nested_types=[],
+ enum_types=[
+ ],
+ options=None,
+ is_extendable=False,
+ syntax='proto3',
+ extension_ranges=[],
+ oneofs=[
+ ],
+ serialized_start=1321,
+ serialized_end=1471,
+)
+
+
+_TCERTCREATERESP = _descriptor.Descriptor(
+ name='TCertCreateResp',
+ full_name='protos.TCertCreateResp',
+ filename=None,
+ file=DESCRIPTOR,
+ containing_type=None,
+ fields=[
+ _descriptor.FieldDescriptor(
+ name='cert', full_name='protos.TCertCreateResp.cert', index=0,
+ number=1, type=11, cpp_type=10, label=1,
+ has_default_value=False, default_value=None,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None),
+ ],
+ extensions=[
+ ],
+ nested_types=[],
+ enum_types=[
+ ],
+ options=None,
+ is_extendable=False,
+ syntax='proto3',
+ extension_ranges=[],
+ oneofs=[
+ ],
+ serialized_start=1473,
+ serialized_end=1518,
+)
+
+
+_TCERTCREATESETREQ = _descriptor.Descriptor(
+ name='TCertCreateSetReq',
+ full_name='protos.TCertCreateSetReq',
+ filename=None,
+ file=DESCRIPTOR,
+ containing_type=None,
+ fields=[
+ _descriptor.FieldDescriptor(
+ name='ts', full_name='protos.TCertCreateSetReq.ts', index=0,
+ number=1, type=11, cpp_type=10, label=1,
+ has_default_value=False, default_value=None,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None),
+ _descriptor.FieldDescriptor(
+ name='id', full_name='protos.TCertCreateSetReq.id', index=1,
+ number=2, type=11, cpp_type=10, label=1,
+ has_default_value=False, default_value=None,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None),
+ _descriptor.FieldDescriptor(
+ name='num', full_name='protos.TCertCreateSetReq.num', index=2,
+ number=3, type=13, cpp_type=3, label=1,
+ has_default_value=False, default_value=0,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None),
+ _descriptor.FieldDescriptor(
+ name='attributes', full_name='protos.TCertCreateSetReq.attributes', index=3,
+ number=4, type=11, cpp_type=10, label=3,
+ has_default_value=False, default_value=[],
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None),
+ _descriptor.FieldDescriptor(
+ name='sig', full_name='protos.TCertCreateSetReq.sig', index=4,
+ number=5, type=11, cpp_type=10, label=1,
+ has_default_value=False, default_value=None,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None),
+ ],
+ extensions=[
+ ],
+ nested_types=[],
+ enum_types=[
+ ],
+ options=None,
+ is_extendable=False,
+ syntax='proto3',
+ extension_ranges=[],
+ oneofs=[
+ ],
+ serialized_start=1521,
+ serialized_end=1699,
+)
+
+
+_TCERTATTRIBUTE = _descriptor.Descriptor(
+ name='TCertAttribute',
+ full_name='protos.TCertAttribute',
+ filename=None,
+ file=DESCRIPTOR,
+ containing_type=None,
+ fields=[
+ _descriptor.FieldDescriptor(
+ name='attributeName', full_name='protos.TCertAttribute.attributeName', index=0,
+ number=1, type=9, cpp_type=9, label=1,
+ has_default_value=False, default_value=_b("").decode('utf-8'),
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None),
+ _descriptor.FieldDescriptor(
+ name='attributeValue', full_name='protos.TCertAttribute.attributeValue', index=1,
+ number=2, type=9, cpp_type=9, label=1,
+ has_default_value=False, default_value=_b("").decode('utf-8'),
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None),
+ ],
+ extensions=[
+ ],
+ nested_types=[],
+ enum_types=[
+ ],
+ options=None,
+ is_extendable=False,
+ syntax='proto3',
+ extension_ranges=[],
+ oneofs=[
+ ],
+ serialized_start=1701,
+ serialized_end=1764,
+)
+
+
+_TCERTCREATESETRESP = _descriptor.Descriptor(
+ name='TCertCreateSetResp',
+ full_name='protos.TCertCreateSetResp',
+ filename=None,
+ file=DESCRIPTOR,
+ containing_type=None,
+ fields=[
+ _descriptor.FieldDescriptor(
+ name='certs', full_name='protos.TCertCreateSetResp.certs', index=0,
+ number=1, type=11, cpp_type=10, label=1,
+ has_default_value=False, default_value=None,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None),
+ ],
+ extensions=[
+ ],
+ nested_types=[],
+ enum_types=[
+ ],
+ options=None,
+ is_extendable=False,
+ syntax='proto3',
+ extension_ranges=[],
+ oneofs=[
+ ],
+ serialized_start=1766,
+ serialized_end=1818,
+)
+
+
+_TCERTREADREQ = _descriptor.Descriptor(
+ name='TCertReadReq',
+ full_name='protos.TCertReadReq',
+ filename=None,
+ file=DESCRIPTOR,
+ containing_type=None,
+ fields=[
+ _descriptor.FieldDescriptor(
+ name='ts', full_name='protos.TCertReadReq.ts', index=0,
+ number=1, type=11, cpp_type=10, label=1,
+ has_default_value=False, default_value=None,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None),
+ _descriptor.FieldDescriptor(
+ name='hash', full_name='protos.TCertReadReq.hash', index=1,
+ number=2, type=11, cpp_type=10, label=1,
+ has_default_value=False, default_value=None,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None),
+ _descriptor.FieldDescriptor(
+ name='req', full_name='protos.TCertReadReq.req', index=2,
+ number=3, type=11, cpp_type=10, label=1,
+ has_default_value=False, default_value=None,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None),
+ _descriptor.FieldDescriptor(
+ name='id', full_name='protos.TCertReadReq.id', index=3,
+ number=4, type=11, cpp_type=10, label=1,
+ has_default_value=False, default_value=None,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None),
+ _descriptor.FieldDescriptor(
+ name='sig', full_name='protos.TCertReadReq.sig', index=4,
+ number=5, type=11, cpp_type=10, label=1,
+ has_default_value=False, default_value=None,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None),
+ ],
+ extensions=[
+ ],
+ nested_types=[],
+ enum_types=[
+ ],
+ options=None,
+ is_extendable=False,
+ syntax='proto3',
+ extension_ranges=[],
+ oneofs=[
+ ],
+ serialized_start=1821,
+ serialized_end=1996,
+)
+
+
+_TCERTREADSETREQ = _descriptor.Descriptor(
+ name='TCertReadSetReq',
+ full_name='protos.TCertReadSetReq',
+ filename=None,
+ file=DESCRIPTOR,
+ containing_type=None,
+ fields=[
+ _descriptor.FieldDescriptor(
+ name='ts', full_name='protos.TCertReadSetReq.ts', index=0,
+ number=1, type=11, cpp_type=10, label=1,
+ has_default_value=False, default_value=None,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None),
+ _descriptor.FieldDescriptor(
+ name='req', full_name='protos.TCertReadSetReq.req', index=1,
+ number=2, type=11, cpp_type=10, label=1,
+ has_default_value=False, default_value=None,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None),
+ _descriptor.FieldDescriptor(
+ name='id', full_name='protos.TCertReadSetReq.id', index=2,
+ number=3, type=11, cpp_type=10, label=1,
+ has_default_value=False, default_value=None,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None),
+ _descriptor.FieldDescriptor(
+ name='num', full_name='protos.TCertReadSetReq.num', index=3,
+ number=4, type=13, cpp_type=3, label=1,
+ has_default_value=False, default_value=0,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None),
+ _descriptor.FieldDescriptor(
+ name='sig', full_name='protos.TCertReadSetReq.sig', index=4,
+ number=5, type=11, cpp_type=10, label=1,
+ has_default_value=False, default_value=None,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None),
+ ],
+ extensions=[
+ ],
+ nested_types=[],
+ enum_types=[
+ ],
+ options=None,
+ is_extendable=False,
+ syntax='proto3',
+ extension_ranges=[],
+ oneofs=[
+ ],
+ serialized_start=1999,
+ serialized_end=2162,
+)
+
+
+_TCERTREADSETSREQ = _descriptor.Descriptor(
+ name='TCertReadSetsReq',
+ full_name='protos.TCertReadSetsReq',
+ filename=None,
+ file=DESCRIPTOR,
+ containing_type=None,
+ fields=[
+ _descriptor.FieldDescriptor(
+ name='begin', full_name='protos.TCertReadSetsReq.begin', index=0,
+ number=1, type=11, cpp_type=10, label=1,
+ has_default_value=False, default_value=None,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None),
+ _descriptor.FieldDescriptor(
+ name='end', full_name='protos.TCertReadSetsReq.end', index=1,
+ number=2, type=11, cpp_type=10, label=1,
+ has_default_value=False, default_value=None,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None),
+ _descriptor.FieldDescriptor(
+ name='req', full_name='protos.TCertReadSetsReq.req', index=2,
+ number=3, type=11, cpp_type=10, label=1,
+ has_default_value=False, default_value=None,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None),
+ _descriptor.FieldDescriptor(
+ name='role', full_name='protos.TCertReadSetsReq.role', index=3,
+ number=4, type=14, cpp_type=8, label=1,
+ has_default_value=False, default_value=0,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None),
+ _descriptor.FieldDescriptor(
+ name='sig', full_name='protos.TCertReadSetsReq.sig', index=4,
+ number=5, type=11, cpp_type=10, label=1,
+ has_default_value=False, default_value=None,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None),
+ ],
+ extensions=[
+ ],
+ nested_types=[],
+ enum_types=[
+ ],
+ options=None,
+ is_extendable=False,
+ syntax='proto3',
+ extension_ranges=[],
+ oneofs=[
+ ],
+ serialized_start=2165,
+ serialized_end=2358,
+)
+
+
+_TCERTREVOKEREQ = _descriptor.Descriptor(
+ name='TCertRevokeReq',
+ full_name='protos.TCertRevokeReq',
+ filename=None,
+ file=DESCRIPTOR,
+ containing_type=None,
+ fields=[
+ _descriptor.FieldDescriptor(
+ name='id', full_name='protos.TCertRevokeReq.id', index=0,
+ number=1, type=11, cpp_type=10, label=1,
+ has_default_value=False, default_value=None,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None),
+ _descriptor.FieldDescriptor(
+ name='cert', full_name='protos.TCertRevokeReq.cert', index=1,
+ number=2, type=11, cpp_type=10, label=1,
+ has_default_value=False, default_value=None,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None),
+ _descriptor.FieldDescriptor(
+ name='sig', full_name='protos.TCertRevokeReq.sig', index=2,
+ number=3, type=11, cpp_type=10, label=1,
+ has_default_value=False, default_value=None,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None),
+ ],
+ extensions=[
+ ],
+ nested_types=[],
+ enum_types=[
+ ],
+ options=None,
+ is_extendable=False,
+ syntax='proto3',
+ extension_ranges=[],
+ oneofs=[
+ ],
+ serialized_start=2360,
+ serialized_end=2466,
+)
+
+
+_TCERTREVOKESETREQ = _descriptor.Descriptor(
+ name='TCertRevokeSetReq',
+ full_name='protos.TCertRevokeSetReq',
+ filename=None,
+ file=DESCRIPTOR,
+ containing_type=None,
+ fields=[
+ _descriptor.FieldDescriptor(
+ name='id', full_name='protos.TCertRevokeSetReq.id', index=0,
+ number=1, type=11, cpp_type=10, label=1,
+ has_default_value=False, default_value=None,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None),
+ _descriptor.FieldDescriptor(
+ name='ts', full_name='protos.TCertRevokeSetReq.ts', index=1,
+ number=2, type=11, cpp_type=10, label=1,
+ has_default_value=False, default_value=None,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None),
+ _descriptor.FieldDescriptor(
+ name='sig', full_name='protos.TCertRevokeSetReq.sig', index=2,
+ number=3, type=11, cpp_type=10, label=1,
+ has_default_value=False, default_value=None,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None),
+ ],
+ extensions=[
+ ],
+ nested_types=[],
+ enum_types=[
+ ],
+ options=None,
+ is_extendable=False,
+ syntax='proto3',
+ extension_ranges=[],
+ oneofs=[
+ ],
+ serialized_start=2468,
+ serialized_end=2589,
+)
+
+
+_TCERTCRLREQ = _descriptor.Descriptor(
+ name='TCertCRLReq',
+ full_name='protos.TCertCRLReq',
+ filename=None,
+ file=DESCRIPTOR,
+ containing_type=None,
+ fields=[
+ _descriptor.FieldDescriptor(
+ name='id', full_name='protos.TCertCRLReq.id', index=0,
+ number=1, type=11, cpp_type=10, label=1,
+ has_default_value=False, default_value=None,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None),
+ _descriptor.FieldDescriptor(
+ name='sig', full_name='protos.TCertCRLReq.sig', index=1,
+ number=2, type=11, cpp_type=10, label=1,
+ has_default_value=False, default_value=None,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None),
+ ],
+ extensions=[
+ ],
+ nested_types=[],
+ enum_types=[
+ ],
+ options=None,
+ is_extendable=False,
+ syntax='proto3',
+ extension_ranges=[],
+ oneofs=[
+ ],
+ serialized_start=2591,
+ serialized_end=2666,
+)
+
+
+_TLSCERTCREATEREQ = _descriptor.Descriptor(
+ name='TLSCertCreateReq',
+ full_name='protos.TLSCertCreateReq',
+ filename=None,
+ file=DESCRIPTOR,
+ containing_type=None,
+ fields=[
+ _descriptor.FieldDescriptor(
+ name='ts', full_name='protos.TLSCertCreateReq.ts', index=0,
+ number=1, type=11, cpp_type=10, label=1,
+ has_default_value=False, default_value=None,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None),
+ _descriptor.FieldDescriptor(
+ name='id', full_name='protos.TLSCertCreateReq.id', index=1,
+ number=2, type=11, cpp_type=10, label=1,
+ has_default_value=False, default_value=None,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None),
+ _descriptor.FieldDescriptor(
+ name='pub', full_name='protos.TLSCertCreateReq.pub', index=2,
+ number=3, type=11, cpp_type=10, label=1,
+ has_default_value=False, default_value=None,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None),
+ _descriptor.FieldDescriptor(
+ name='sig', full_name='protos.TLSCertCreateReq.sig', index=3,
+ number=4, type=11, cpp_type=10, label=1,
+ has_default_value=False, default_value=None,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None),
+ ],
+ extensions=[
+ ],
+ nested_types=[],
+ enum_types=[
+ ],
+ options=None,
+ is_extendable=False,
+ syntax='proto3',
+ extension_ranges=[],
+ oneofs=[
+ ],
+ serialized_start=2669,
+ serialized_end=2821,
+)
+
+
+_TLSCERTCREATERESP = _descriptor.Descriptor(
+ name='TLSCertCreateResp',
+ full_name='protos.TLSCertCreateResp',
+ filename=None,
+ file=DESCRIPTOR,
+ containing_type=None,
+ fields=[
+ _descriptor.FieldDescriptor(
+ name='cert', full_name='protos.TLSCertCreateResp.cert', index=0,
+ number=1, type=11, cpp_type=10, label=1,
+ has_default_value=False, default_value=None,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None),
+ _descriptor.FieldDescriptor(
+ name='rootCert', full_name='protos.TLSCertCreateResp.rootCert', index=1,
+ number=2, type=11, cpp_type=10, label=1,
+ has_default_value=False, default_value=None,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None),
+ ],
+ extensions=[
+ ],
+ nested_types=[],
+ enum_types=[
+ ],
+ options=None,
+ is_extendable=False,
+ syntax='proto3',
+ extension_ranges=[],
+ oneofs=[
+ ],
+ serialized_start=2823,
+ serialized_end=2902,
+)
+
+
+_TLSCERTREADREQ = _descriptor.Descriptor(
+ name='TLSCertReadReq',
+ full_name='protos.TLSCertReadReq',
+ filename=None,
+ file=DESCRIPTOR,
+ containing_type=None,
+ fields=[
+ _descriptor.FieldDescriptor(
+ name='id', full_name='protos.TLSCertReadReq.id', index=0,
+ number=1, type=11, cpp_type=10, label=1,
+ has_default_value=False, default_value=None,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None),
+ ],
+ extensions=[
+ ],
+ nested_types=[],
+ enum_types=[
+ ],
+ options=None,
+ is_extendable=False,
+ syntax='proto3',
+ extension_ranges=[],
+ oneofs=[
+ ],
+ serialized_start=2904,
+ serialized_end=2950,
+)
+
+
+_TLSCERTREVOKEREQ = _descriptor.Descriptor(
+ name='TLSCertRevokeReq',
+ full_name='protos.TLSCertRevokeReq',
+ filename=None,
+ file=DESCRIPTOR,
+ containing_type=None,
+ fields=[
+ _descriptor.FieldDescriptor(
+ name='id', full_name='protos.TLSCertRevokeReq.id', index=0,
+ number=1, type=11, cpp_type=10, label=1,
+ has_default_value=False, default_value=None,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None),
+ _descriptor.FieldDescriptor(
+ name='cert', full_name='protos.TLSCertRevokeReq.cert', index=1,
+ number=2, type=11, cpp_type=10, label=1,
+ has_default_value=False, default_value=None,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None),
+ _descriptor.FieldDescriptor(
+ name='sig', full_name='protos.TLSCertRevokeReq.sig', index=2,
+ number=3, type=11, cpp_type=10, label=1,
+ has_default_value=False, default_value=None,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None),
+ ],
+ extensions=[
+ ],
+ nested_types=[],
+ enum_types=[
+ ],
+ options=None,
+ is_extendable=False,
+ syntax='proto3',
+ extension_ranges=[],
+ oneofs=[
+ ],
+ serialized_start=2952,
+ serialized_end=3060,
+)
+
+
+_CERT = _descriptor.Descriptor(
+ name='Cert',
+ full_name='protos.Cert',
+ filename=None,
+ file=DESCRIPTOR,
+ containing_type=None,
+ fields=[
+ _descriptor.FieldDescriptor(
+ name='cert', full_name='protos.Cert.cert', index=0,
+ number=1, type=12, cpp_type=9, label=1,
+ has_default_value=False, default_value=_b(""),
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None),
+ ],
+ extensions=[
+ ],
+ nested_types=[],
+ enum_types=[
+ ],
+ options=None,
+ is_extendable=False,
+ syntax='proto3',
+ extension_ranges=[],
+ oneofs=[
+ ],
+ serialized_start=3062,
+ serialized_end=3082,
+)
+
+
+_TCERT_KEYSENTRY = _descriptor.Descriptor(
+ name='KeysEntry',
+ full_name='protos.TCert.KeysEntry',
+ filename=None,
+ file=DESCRIPTOR,
+ containing_type=None,
+ fields=[
+ _descriptor.FieldDescriptor(
+ name='key', full_name='protos.TCert.KeysEntry.key', index=0,
+ number=1, type=9, cpp_type=9, label=1,
+ has_default_value=False, default_value=_b("").decode('utf-8'),
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None),
+ _descriptor.FieldDescriptor(
+ name='value', full_name='protos.TCert.KeysEntry.value', index=1,
+ number=2, type=12, cpp_type=9, label=1,
+ has_default_value=False, default_value=_b(""),
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None),
+ ],
+ extensions=[
+ ],
+ nested_types=[],
+ enum_types=[
+ ],
+ options=_descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001')),
+ is_extendable=False,
+ syntax='proto3',
+ extension_ranges=[],
+ oneofs=[
+ ],
+ serialized_start=3146,
+ serialized_end=3189,
+)
+
+_TCERT = _descriptor.Descriptor(
+ name='TCert',
+ full_name='protos.TCert',
+ filename=None,
+ file=DESCRIPTOR,
+ containing_type=None,
+ fields=[
+ _descriptor.FieldDescriptor(
+ name='cert', full_name='protos.TCert.cert', index=0,
+ number=1, type=12, cpp_type=9, label=1,
+ has_default_value=False, default_value=_b(""),
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None),
+ _descriptor.FieldDescriptor(
+ name='keys', full_name='protos.TCert.keys', index=1,
+ number=2, type=11, cpp_type=10, label=3,
+ has_default_value=False, default_value=[],
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None),
+ ],
+ extensions=[
+ ],
+ nested_types=[_TCERT_KEYSENTRY, ],
+ enum_types=[
+ ],
+ options=None,
+ is_extendable=False,
+ syntax='proto3',
+ extension_ranges=[],
+ oneofs=[
+ ],
+ serialized_start=3084,
+ serialized_end=3189,
+)
+
+
+_CERTSET = _descriptor.Descriptor(
+ name='CertSet',
+ full_name='protos.CertSet',
+ filename=None,
+ file=DESCRIPTOR,
+ containing_type=None,
+ fields=[
+ _descriptor.FieldDescriptor(
+ name='ts', full_name='protos.CertSet.ts', index=0,
+ number=1, type=11, cpp_type=10, label=1,
+ has_default_value=False, default_value=None,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None),
+ _descriptor.FieldDescriptor(
+ name='id', full_name='protos.CertSet.id', index=1,
+ number=2, type=11, cpp_type=10, label=1,
+ has_default_value=False, default_value=None,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None),
+ _descriptor.FieldDescriptor(
+ name='key', full_name='protos.CertSet.key', index=2,
+ number=3, type=12, cpp_type=9, label=1,
+ has_default_value=False, default_value=_b(""),
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None),
+ _descriptor.FieldDescriptor(
+ name='certs', full_name='protos.CertSet.certs', index=3,
+ number=4, type=11, cpp_type=10, label=3,
+ has_default_value=False, default_value=[],
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None),
+ ],
+ extensions=[
+ ],
+ nested_types=[],
+ enum_types=[
+ ],
+ options=None,
+ is_extendable=False,
+ syntax='proto3',
+ extension_ranges=[],
+ oneofs=[
+ ],
+ serialized_start=3191,
+ serialized_end=3313,
+)
+
+
+_CERTSETS = _descriptor.Descriptor(
+ name='CertSets',
+ full_name='protos.CertSets',
+ filename=None,
+ file=DESCRIPTOR,
+ containing_type=None,
+ fields=[
+ _descriptor.FieldDescriptor(
+ name='sets', full_name='protos.CertSets.sets', index=0,
+ number=1, type=11, cpp_type=10, label=3,
+ has_default_value=False, default_value=[],
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None),
+ ],
+ extensions=[
+ ],
+ nested_types=[],
+ enum_types=[
+ ],
+ options=None,
+ is_extendable=False,
+ syntax='proto3',
+ extension_ranges=[],
+ oneofs=[
+ ],
+ serialized_start=3315,
+ serialized_end=3356,
+)
+
+
+_CERTPAIR = _descriptor.Descriptor(
+ name='CertPair',
+ full_name='protos.CertPair',
+ filename=None,
+ file=DESCRIPTOR,
+ containing_type=None,
+ fields=[
+ _descriptor.FieldDescriptor(
+ name='sign', full_name='protos.CertPair.sign', index=0,
+ number=1, type=12, cpp_type=9, label=1,
+ has_default_value=False, default_value=_b(""),
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None),
+ _descriptor.FieldDescriptor(
+ name='enc', full_name='protos.CertPair.enc', index=1,
+ number=2, type=12, cpp_type=9, label=1,
+ has_default_value=False, default_value=_b(""),
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None),
+ ],
+ extensions=[
+ ],
+ nested_types=[],
+ enum_types=[
+ ],
+ options=None,
+ is_extendable=False,
+ syntax='proto3',
+ extension_ranges=[],
+ oneofs=[
+ ],
+ serialized_start=3358,
+ serialized_end=3395,
+)
+
+_CASTATUS.fields_by_name['status'].enum_type = _CASTATUS_STATUSCODE
+_CASTATUS_STATUSCODE.containing_type = _CASTATUS
+_PUBLICKEY.fields_by_name['type'].enum_type = _CRYPTOTYPE
+_PRIVATEKEY.fields_by_name['type'].enum_type = _CRYPTOTYPE
+_SIGNATURE.fields_by_name['type'].enum_type = _CRYPTOTYPE
+_REGISTERUSERREQ.fields_by_name['id'].message_type = _IDENTITY
+_REGISTERUSERREQ.fields_by_name['role'].enum_type = _ROLE
+_READUSERSETREQ.fields_by_name['req'].message_type = _IDENTITY
+_READUSERSETREQ.fields_by_name['role'].enum_type = _ROLE
+_READUSERSETREQ.fields_by_name['sig'].message_type = _SIGNATURE
+_USER.fields_by_name['id'].message_type = _IDENTITY
+_USER.fields_by_name['role'].enum_type = _ROLE
+_USERSET.fields_by_name['users'].message_type = _USER
+_ECERTCREATEREQ.fields_by_name['ts'].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP
+_ECERTCREATEREQ.fields_by_name['id'].message_type = _IDENTITY
+_ECERTCREATEREQ.fields_by_name['tok'].message_type = _TOKEN
+_ECERTCREATEREQ.fields_by_name['sign'].message_type = _PUBLICKEY
+_ECERTCREATEREQ.fields_by_name['enc'].message_type = _PUBLICKEY
+_ECERTCREATEREQ.fields_by_name['sig'].message_type = _SIGNATURE
+_ECERTCREATERESP.fields_by_name['certs'].message_type = _CERTPAIR
+_ECERTCREATERESP.fields_by_name['chain'].message_type = _TOKEN
+_ECERTCREATERESP.fields_by_name['tok'].message_type = _TOKEN
+_ECERTREADREQ.fields_by_name['id'].message_type = _IDENTITY
+_ECERTREVOKEREQ.fields_by_name['id'].message_type = _IDENTITY
+_ECERTREVOKEREQ.fields_by_name['cert'].message_type = _CERT
+_ECERTREVOKEREQ.fields_by_name['sig'].message_type = _SIGNATURE
+_ECERTCRLREQ.fields_by_name['id'].message_type = _IDENTITY
+_ECERTCRLREQ.fields_by_name['sig'].message_type = _SIGNATURE
+_TCERTCREATEREQ.fields_by_name['ts'].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP
+_TCERTCREATEREQ.fields_by_name['id'].message_type = _IDENTITY
+_TCERTCREATEREQ.fields_by_name['pub'].message_type = _PUBLICKEY
+_TCERTCREATEREQ.fields_by_name['sig'].message_type = _SIGNATURE
+_TCERTCREATERESP.fields_by_name['cert'].message_type = _CERT
+_TCERTCREATESETREQ.fields_by_name['ts'].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP
+_TCERTCREATESETREQ.fields_by_name['id'].message_type = _IDENTITY
+_TCERTCREATESETREQ.fields_by_name['attributes'].message_type = _TCERTATTRIBUTE
+_TCERTCREATESETREQ.fields_by_name['sig'].message_type = _SIGNATURE
+_TCERTCREATESETRESP.fields_by_name['certs'].message_type = _CERTSET
+_TCERTREADREQ.fields_by_name['ts'].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP
+_TCERTREADREQ.fields_by_name['hash'].message_type = _HASH
+_TCERTREADREQ.fields_by_name['req'].message_type = _IDENTITY
+_TCERTREADREQ.fields_by_name['id'].message_type = _IDENTITY
+_TCERTREADREQ.fields_by_name['sig'].message_type = _SIGNATURE
+_TCERTREADSETREQ.fields_by_name['ts'].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP
+_TCERTREADSETREQ.fields_by_name['req'].message_type = _IDENTITY
+_TCERTREADSETREQ.fields_by_name['id'].message_type = _IDENTITY
+_TCERTREADSETREQ.fields_by_name['sig'].message_type = _SIGNATURE
+_TCERTREADSETSREQ.fields_by_name['begin'].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP
+_TCERTREADSETSREQ.fields_by_name['end'].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP
+_TCERTREADSETSREQ.fields_by_name['req'].message_type = _IDENTITY
+_TCERTREADSETSREQ.fields_by_name['role'].enum_type = _ROLE
+_TCERTREADSETSREQ.fields_by_name['sig'].message_type = _SIGNATURE
+_TCERTREVOKEREQ.fields_by_name['id'].message_type = _IDENTITY
+_TCERTREVOKEREQ.fields_by_name['cert'].message_type = _CERT
+_TCERTREVOKEREQ.fields_by_name['sig'].message_type = _SIGNATURE
+_TCERTREVOKESETREQ.fields_by_name['id'].message_type = _IDENTITY
+_TCERTREVOKESETREQ.fields_by_name['ts'].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP
+_TCERTREVOKESETREQ.fields_by_name['sig'].message_type = _SIGNATURE
+_TCERTCRLREQ.fields_by_name['id'].message_type = _IDENTITY
+_TCERTCRLREQ.fields_by_name['sig'].message_type = _SIGNATURE
+_TLSCERTCREATEREQ.fields_by_name['ts'].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP
+_TLSCERTCREATEREQ.fields_by_name['id'].message_type = _IDENTITY
+_TLSCERTCREATEREQ.fields_by_name['pub'].message_type = _PUBLICKEY
+_TLSCERTCREATEREQ.fields_by_name['sig'].message_type = _SIGNATURE
+_TLSCERTCREATERESP.fields_by_name['cert'].message_type = _CERT
+_TLSCERTCREATERESP.fields_by_name['rootCert'].message_type = _CERT
+_TLSCERTREADREQ.fields_by_name['id'].message_type = _IDENTITY
+_TLSCERTREVOKEREQ.fields_by_name['id'].message_type = _IDENTITY
+_TLSCERTREVOKEREQ.fields_by_name['cert'].message_type = _CERT
+_TLSCERTREVOKEREQ.fields_by_name['sig'].message_type = _SIGNATURE
+_TCERT_KEYSENTRY.containing_type = _TCERT
+_TCERT.fields_by_name['keys'].message_type = _TCERT_KEYSENTRY
+_CERTSET.fields_by_name['ts'].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP
+_CERTSET.fields_by_name['id'].message_type = _IDENTITY
+_CERTSET.fields_by_name['certs'].message_type = _TCERT
+_CERTSETS.fields_by_name['sets'].message_type = _CERTSET
+DESCRIPTOR.message_types_by_name['CAStatus'] = _CASTATUS
+DESCRIPTOR.message_types_by_name['Empty'] = _EMPTY
+DESCRIPTOR.message_types_by_name['Identity'] = _IDENTITY
+DESCRIPTOR.message_types_by_name['Token'] = _TOKEN
+DESCRIPTOR.message_types_by_name['Hash'] = _HASH
+DESCRIPTOR.message_types_by_name['PublicKey'] = _PUBLICKEY
+DESCRIPTOR.message_types_by_name['PrivateKey'] = _PRIVATEKEY
+DESCRIPTOR.message_types_by_name['Signature'] = _SIGNATURE
+DESCRIPTOR.message_types_by_name['RegisterUserReq'] = _REGISTERUSERREQ
+DESCRIPTOR.message_types_by_name['ReadUserSetReq'] = _READUSERSETREQ
+DESCRIPTOR.message_types_by_name['User'] = _USER
+DESCRIPTOR.message_types_by_name['UserSet'] = _USERSET
+DESCRIPTOR.message_types_by_name['ECertCreateReq'] = _ECERTCREATEREQ
+DESCRIPTOR.message_types_by_name['ECertCreateResp'] = _ECERTCREATERESP
+DESCRIPTOR.message_types_by_name['ECertReadReq'] = _ECERTREADREQ
+DESCRIPTOR.message_types_by_name['ECertRevokeReq'] = _ECERTREVOKEREQ
+DESCRIPTOR.message_types_by_name['ECertCRLReq'] = _ECERTCRLREQ
+DESCRIPTOR.message_types_by_name['TCertCreateReq'] = _TCERTCREATEREQ
+DESCRIPTOR.message_types_by_name['TCertCreateResp'] = _TCERTCREATERESP
+DESCRIPTOR.message_types_by_name['TCertCreateSetReq'] = _TCERTCREATESETREQ
+DESCRIPTOR.message_types_by_name['TCertAttribute'] = _TCERTATTRIBUTE
+DESCRIPTOR.message_types_by_name['TCertCreateSetResp'] = _TCERTCREATESETRESP
+DESCRIPTOR.message_types_by_name['TCertReadReq'] = _TCERTREADREQ
+DESCRIPTOR.message_types_by_name['TCertReadSetReq'] = _TCERTREADSETREQ
+DESCRIPTOR.message_types_by_name['TCertReadSetsReq'] = _TCERTREADSETSREQ
+DESCRIPTOR.message_types_by_name['TCertRevokeReq'] = _TCERTREVOKEREQ
+DESCRIPTOR.message_types_by_name['TCertRevokeSetReq'] = _TCERTREVOKESETREQ
+DESCRIPTOR.message_types_by_name['TCertCRLReq'] = _TCERTCRLREQ
+DESCRIPTOR.message_types_by_name['TLSCertCreateReq'] = _TLSCERTCREATEREQ
+DESCRIPTOR.message_types_by_name['TLSCertCreateResp'] = _TLSCERTCREATERESP
+DESCRIPTOR.message_types_by_name['TLSCertReadReq'] = _TLSCERTREADREQ
+DESCRIPTOR.message_types_by_name['TLSCertRevokeReq'] = _TLSCERTREVOKEREQ
+DESCRIPTOR.message_types_by_name['Cert'] = _CERT
+DESCRIPTOR.message_types_by_name['TCert'] = _TCERT
+DESCRIPTOR.message_types_by_name['CertSet'] = _CERTSET
+DESCRIPTOR.message_types_by_name['CertSets'] = _CERTSETS
+DESCRIPTOR.message_types_by_name['CertPair'] = _CERTPAIR
+DESCRIPTOR.enum_types_by_name['CryptoType'] = _CRYPTOTYPE
+DESCRIPTOR.enum_types_by_name['Role'] = _ROLE
+
+CAStatus = _reflection.GeneratedProtocolMessageType('CAStatus', (_message.Message,), dict(
+ DESCRIPTOR = _CASTATUS,
+ __module__ = 'ca_pb2'
+ # @@protoc_insertion_point(class_scope:protos.CAStatus)
+ ))
+_sym_db.RegisterMessage(CAStatus)
+
+Empty = _reflection.GeneratedProtocolMessageType('Empty', (_message.Message,), dict(
+ DESCRIPTOR = _EMPTY,
+ __module__ = 'ca_pb2'
+ # @@protoc_insertion_point(class_scope:protos.Empty)
+ ))
+_sym_db.RegisterMessage(Empty)
+
+Identity = _reflection.GeneratedProtocolMessageType('Identity', (_message.Message,), dict(
+ DESCRIPTOR = _IDENTITY,
+ __module__ = 'ca_pb2'
+ # @@protoc_insertion_point(class_scope:protos.Identity)
+ ))
+_sym_db.RegisterMessage(Identity)
+
+Token = _reflection.GeneratedProtocolMessageType('Token', (_message.Message,), dict(
+ DESCRIPTOR = _TOKEN,
+ __module__ = 'ca_pb2'
+ # @@protoc_insertion_point(class_scope:protos.Token)
+ ))
+_sym_db.RegisterMessage(Token)
+
+Hash = _reflection.GeneratedProtocolMessageType('Hash', (_message.Message,), dict(
+ DESCRIPTOR = _HASH,
+ __module__ = 'ca_pb2'
+ # @@protoc_insertion_point(class_scope:protos.Hash)
+ ))
+_sym_db.RegisterMessage(Hash)
+
+PublicKey = _reflection.GeneratedProtocolMessageType('PublicKey', (_message.Message,), dict(
+ DESCRIPTOR = _PUBLICKEY,
+ __module__ = 'ca_pb2'
+ # @@protoc_insertion_point(class_scope:protos.PublicKey)
+ ))
+_sym_db.RegisterMessage(PublicKey)
+
+PrivateKey = _reflection.GeneratedProtocolMessageType('PrivateKey', (_message.Message,), dict(
+ DESCRIPTOR = _PRIVATEKEY,
+ __module__ = 'ca_pb2'
+ # @@protoc_insertion_point(class_scope:protos.PrivateKey)
+ ))
+_sym_db.RegisterMessage(PrivateKey)
+
+Signature = _reflection.GeneratedProtocolMessageType('Signature', (_message.Message,), dict(
+ DESCRIPTOR = _SIGNATURE,
+ __module__ = 'ca_pb2'
+ # @@protoc_insertion_point(class_scope:protos.Signature)
+ ))
+_sym_db.RegisterMessage(Signature)
+
+RegisterUserReq = _reflection.GeneratedProtocolMessageType('RegisterUserReq', (_message.Message,), dict(
+ DESCRIPTOR = _REGISTERUSERREQ,
+ __module__ = 'ca_pb2'
+ # @@protoc_insertion_point(class_scope:protos.RegisterUserReq)
+ ))
+_sym_db.RegisterMessage(RegisterUserReq)
+
+ReadUserSetReq = _reflection.GeneratedProtocolMessageType('ReadUserSetReq', (_message.Message,), dict(
+ DESCRIPTOR = _READUSERSETREQ,
+ __module__ = 'ca_pb2'
+ # @@protoc_insertion_point(class_scope:protos.ReadUserSetReq)
+ ))
+_sym_db.RegisterMessage(ReadUserSetReq)
+
+User = _reflection.GeneratedProtocolMessageType('User', (_message.Message,), dict(
+ DESCRIPTOR = _USER,
+ __module__ = 'ca_pb2'
+ # @@protoc_insertion_point(class_scope:protos.User)
+ ))
+_sym_db.RegisterMessage(User)
+
+UserSet = _reflection.GeneratedProtocolMessageType('UserSet', (_message.Message,), dict(
+ DESCRIPTOR = _USERSET,
+ __module__ = 'ca_pb2'
+ # @@protoc_insertion_point(class_scope:protos.UserSet)
+ ))
+_sym_db.RegisterMessage(UserSet)
+
+ECertCreateReq = _reflection.GeneratedProtocolMessageType('ECertCreateReq', (_message.Message,), dict(
+ DESCRIPTOR = _ECERTCREATEREQ,
+ __module__ = 'ca_pb2'
+ # @@protoc_insertion_point(class_scope:protos.ECertCreateReq)
+ ))
+_sym_db.RegisterMessage(ECertCreateReq)
+
+ECertCreateResp = _reflection.GeneratedProtocolMessageType('ECertCreateResp', (_message.Message,), dict(
+ DESCRIPTOR = _ECERTCREATERESP,
+ __module__ = 'ca_pb2'
+ # @@protoc_insertion_point(class_scope:protos.ECertCreateResp)
+ ))
+_sym_db.RegisterMessage(ECertCreateResp)
+
+ECertReadReq = _reflection.GeneratedProtocolMessageType('ECertReadReq', (_message.Message,), dict(
+ DESCRIPTOR = _ECERTREADREQ,
+ __module__ = 'ca_pb2'
+ # @@protoc_insertion_point(class_scope:protos.ECertReadReq)
+ ))
+_sym_db.RegisterMessage(ECertReadReq)
+
+ECertRevokeReq = _reflection.GeneratedProtocolMessageType('ECertRevokeReq', (_message.Message,), dict(
+ DESCRIPTOR = _ECERTREVOKEREQ,
+ __module__ = 'ca_pb2'
+ # @@protoc_insertion_point(class_scope:protos.ECertRevokeReq)
+ ))
+_sym_db.RegisterMessage(ECertRevokeReq)
+
+ECertCRLReq = _reflection.GeneratedProtocolMessageType('ECertCRLReq', (_message.Message,), dict(
+ DESCRIPTOR = _ECERTCRLREQ,
+ __module__ = 'ca_pb2'
+ # @@protoc_insertion_point(class_scope:protos.ECertCRLReq)
+ ))
+_sym_db.RegisterMessage(ECertCRLReq)
+
+TCertCreateReq = _reflection.GeneratedProtocolMessageType('TCertCreateReq', (_message.Message,), dict(
+ DESCRIPTOR = _TCERTCREATEREQ,
+ __module__ = 'ca_pb2'
+ # @@protoc_insertion_point(class_scope:protos.TCertCreateReq)
+ ))
+_sym_db.RegisterMessage(TCertCreateReq)
+
+TCertCreateResp = _reflection.GeneratedProtocolMessageType('TCertCreateResp', (_message.Message,), dict(
+ DESCRIPTOR = _TCERTCREATERESP,
+ __module__ = 'ca_pb2'
+ # @@protoc_insertion_point(class_scope:protos.TCertCreateResp)
+ ))
+_sym_db.RegisterMessage(TCertCreateResp)
+
+TCertCreateSetReq = _reflection.GeneratedProtocolMessageType('TCertCreateSetReq', (_message.Message,), dict(
+ DESCRIPTOR = _TCERTCREATESETREQ,
+ __module__ = 'ca_pb2'
+ # @@protoc_insertion_point(class_scope:protos.TCertCreateSetReq)
+ ))
+_sym_db.RegisterMessage(TCertCreateSetReq)
+
+TCertAttribute = _reflection.GeneratedProtocolMessageType('TCertAttribute', (_message.Message,), dict(
+ DESCRIPTOR = _TCERTATTRIBUTE,
+ __module__ = 'ca_pb2'
+ # @@protoc_insertion_point(class_scope:protos.TCertAttribute)
+ ))
+_sym_db.RegisterMessage(TCertAttribute)
+
+TCertCreateSetResp = _reflection.GeneratedProtocolMessageType('TCertCreateSetResp', (_message.Message,), dict(
+ DESCRIPTOR = _TCERTCREATESETRESP,
+ __module__ = 'ca_pb2'
+ # @@protoc_insertion_point(class_scope:protos.TCertCreateSetResp)
+ ))
+_sym_db.RegisterMessage(TCertCreateSetResp)
+
+TCertReadReq = _reflection.GeneratedProtocolMessageType('TCertReadReq', (_message.Message,), dict(
+ DESCRIPTOR = _TCERTREADREQ,
+ __module__ = 'ca_pb2'
+ # @@protoc_insertion_point(class_scope:protos.TCertReadReq)
+ ))
+_sym_db.RegisterMessage(TCertReadReq)
+
+TCertReadSetReq = _reflection.GeneratedProtocolMessageType('TCertReadSetReq', (_message.Message,), dict(
+ DESCRIPTOR = _TCERTREADSETREQ,
+ __module__ = 'ca_pb2'
+ # @@protoc_insertion_point(class_scope:protos.TCertReadSetReq)
+ ))
+_sym_db.RegisterMessage(TCertReadSetReq)
+
+TCertReadSetsReq = _reflection.GeneratedProtocolMessageType('TCertReadSetsReq', (_message.Message,), dict(
+ DESCRIPTOR = _TCERTREADSETSREQ,
+ __module__ = 'ca_pb2'
+ # @@protoc_insertion_point(class_scope:protos.TCertReadSetsReq)
+ ))
+_sym_db.RegisterMessage(TCertReadSetsReq)
+
+TCertRevokeReq = _reflection.GeneratedProtocolMessageType('TCertRevokeReq', (_message.Message,), dict(
+ DESCRIPTOR = _TCERTREVOKEREQ,
+ __module__ = 'ca_pb2'
+ # @@protoc_insertion_point(class_scope:protos.TCertRevokeReq)
+ ))
+_sym_db.RegisterMessage(TCertRevokeReq)
+
+TCertRevokeSetReq = _reflection.GeneratedProtocolMessageType('TCertRevokeSetReq', (_message.Message,), dict(
+ DESCRIPTOR = _TCERTREVOKESETREQ,
+ __module__ = 'ca_pb2'
+ # @@protoc_insertion_point(class_scope:protos.TCertRevokeSetReq)
+ ))
+_sym_db.RegisterMessage(TCertRevokeSetReq)
+
+TCertCRLReq = _reflection.GeneratedProtocolMessageType('TCertCRLReq', (_message.Message,), dict(
+ DESCRIPTOR = _TCERTCRLREQ,
+ __module__ = 'ca_pb2'
+ # @@protoc_insertion_point(class_scope:protos.TCertCRLReq)
+ ))
+_sym_db.RegisterMessage(TCertCRLReq)
+
+TLSCertCreateReq = _reflection.GeneratedProtocolMessageType('TLSCertCreateReq', (_message.Message,), dict(
+ DESCRIPTOR = _TLSCERTCREATEREQ,
+ __module__ = 'ca_pb2'
+ # @@protoc_insertion_point(class_scope:protos.TLSCertCreateReq)
+ ))
+_sym_db.RegisterMessage(TLSCertCreateReq)
+
+TLSCertCreateResp = _reflection.GeneratedProtocolMessageType('TLSCertCreateResp', (_message.Message,), dict(
+ DESCRIPTOR = _TLSCERTCREATERESP,
+ __module__ = 'ca_pb2'
+ # @@protoc_insertion_point(class_scope:protos.TLSCertCreateResp)
+ ))
+_sym_db.RegisterMessage(TLSCertCreateResp)
+
+TLSCertReadReq = _reflection.GeneratedProtocolMessageType('TLSCertReadReq', (_message.Message,), dict(
+ DESCRIPTOR = _TLSCERTREADREQ,
+ __module__ = 'ca_pb2'
+ # @@protoc_insertion_point(class_scope:protos.TLSCertReadReq)
+ ))
+_sym_db.RegisterMessage(TLSCertReadReq)
+
+TLSCertRevokeReq = _reflection.GeneratedProtocolMessageType('TLSCertRevokeReq', (_message.Message,), dict(
+ DESCRIPTOR = _TLSCERTREVOKEREQ,
+ __module__ = 'ca_pb2'
+ # @@protoc_insertion_point(class_scope:protos.TLSCertRevokeReq)
+ ))
+_sym_db.RegisterMessage(TLSCertRevokeReq)
+
+Cert = _reflection.GeneratedProtocolMessageType('Cert', (_message.Message,), dict(
+ DESCRIPTOR = _CERT,
+ __module__ = 'ca_pb2'
+ # @@protoc_insertion_point(class_scope:protos.Cert)
+ ))
+_sym_db.RegisterMessage(Cert)
+
+TCert = _reflection.GeneratedProtocolMessageType('TCert', (_message.Message,), dict(
+
+ KeysEntry = _reflection.GeneratedProtocolMessageType('KeysEntry', (_message.Message,), dict(
+ DESCRIPTOR = _TCERT_KEYSENTRY,
+ __module__ = 'ca_pb2'
+ # @@protoc_insertion_point(class_scope:protos.TCert.KeysEntry)
+ ))
+ ,
+ DESCRIPTOR = _TCERT,
+ __module__ = 'ca_pb2'
+ # @@protoc_insertion_point(class_scope:protos.TCert)
+ ))
+_sym_db.RegisterMessage(TCert)
+_sym_db.RegisterMessage(TCert.KeysEntry)
+
+CertSet = _reflection.GeneratedProtocolMessageType('CertSet', (_message.Message,), dict(
+ DESCRIPTOR = _CERTSET,
+ __module__ = 'ca_pb2'
+ # @@protoc_insertion_point(class_scope:protos.CertSet)
+ ))
+_sym_db.RegisterMessage(CertSet)
+
+CertSets = _reflection.GeneratedProtocolMessageType('CertSets', (_message.Message,), dict(
+ DESCRIPTOR = _CERTSETS,
+ __module__ = 'ca_pb2'
+ # @@protoc_insertion_point(class_scope:protos.CertSets)
+ ))
+_sym_db.RegisterMessage(CertSets)
+
+CertPair = _reflection.GeneratedProtocolMessageType('CertPair', (_message.Message,), dict(
+ DESCRIPTOR = _CERTPAIR,
+ __module__ = 'ca_pb2'
+ # @@protoc_insertion_point(class_scope:protos.CertPair)
+ ))
+_sym_db.RegisterMessage(CertPair)
+
+
+_TCERT_KEYSENTRY.has_options = True
+_TCERT_KEYSENTRY._options = _descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001'))
+import abc
+import six
+from grpc.beta import implementations as beta_implementations
+from grpc.beta import interfaces as beta_interfaces
+from grpc.framework.common import cardinality
+from grpc.framework.interfaces.face import utilities as face_utilities
+
+class BetaECAPServicer(object):
+ """Enrollment Certificate Authority (ECA).
+
+ public service
+ """
+ def ReadCACertificate(self, request, context):
+ context.code(beta_interfaces.StatusCode.UNIMPLEMENTED)
+ def CreateCertificatePair(self, request, context):
+ context.code(beta_interfaces.StatusCode.UNIMPLEMENTED)
+ def ReadCertificatePair(self, request, context):
+ context.code(beta_interfaces.StatusCode.UNIMPLEMENTED)
+ def ReadCertificateByHash(self, request, context):
+ context.code(beta_interfaces.StatusCode.UNIMPLEMENTED)
+ def RevokeCertificatePair(self, request, context):
+ """a user can revoke only his/her own cert
+ """
+ context.code(beta_interfaces.StatusCode.UNIMPLEMENTED)
+
+class BetaECAPStub(object):
+ """Enrollment Certificate Authority (ECA).
+
+ public service
+ """
+ def ReadCACertificate(self, request, timeout):
+ raise NotImplementedError()
+ ReadCACertificate.future = None
+ def CreateCertificatePair(self, request, timeout):
+ raise NotImplementedError()
+ CreateCertificatePair.future = None
+ def ReadCertificatePair(self, request, timeout):
+ raise NotImplementedError()
+ ReadCertificatePair.future = None
+ def ReadCertificateByHash(self, request, timeout):
+ raise NotImplementedError()
+ ReadCertificateByHash.future = None
+ def RevokeCertificatePair(self, request, timeout):
+ """a user can revoke only his/her own cert
+ """
+ raise NotImplementedError()
+ RevokeCertificatePair.future = None
+
+def beta_create_ECAP_server(servicer, pool=None, pool_size=None, default_timeout=None, maximum_timeout=None):
+ import ca_pb2
+ import ca_pb2
+ import ca_pb2
+ import ca_pb2
+ import ca_pb2
+ import ca_pb2
+ import ca_pb2
+ import ca_pb2
+ import ca_pb2
+ import ca_pb2
+ request_deserializers = {
+ ('protos.ECAP', 'CreateCertificatePair'): ca_pb2.ECertCreateReq.FromString,
+ ('protos.ECAP', 'ReadCACertificate'): ca_pb2.Empty.FromString,
+ ('protos.ECAP', 'ReadCertificateByHash'): ca_pb2.Hash.FromString,
+ ('protos.ECAP', 'ReadCertificatePair'): ca_pb2.ECertReadReq.FromString,
+ ('protos.ECAP', 'RevokeCertificatePair'): ca_pb2.ECertRevokeReq.FromString,
+ }
+ response_serializers = {
+ ('protos.ECAP', 'CreateCertificatePair'): ca_pb2.ECertCreateResp.SerializeToString,
+ ('protos.ECAP', 'ReadCACertificate'): ca_pb2.Cert.SerializeToString,
+ ('protos.ECAP', 'ReadCertificateByHash'): ca_pb2.Cert.SerializeToString,
+ ('protos.ECAP', 'ReadCertificatePair'): ca_pb2.CertPair.SerializeToString,
+ ('protos.ECAP', 'RevokeCertificatePair'): ca_pb2.CAStatus.SerializeToString,
+ }
+ method_implementations = {
+ ('protos.ECAP', 'CreateCertificatePair'): face_utilities.unary_unary_inline(servicer.CreateCertificatePair),
+ ('protos.ECAP', 'ReadCACertificate'): face_utilities.unary_unary_inline(servicer.ReadCACertificate),
+ ('protos.ECAP', 'ReadCertificateByHash'): face_utilities.unary_unary_inline(servicer.ReadCertificateByHash),
+ ('protos.ECAP', 'ReadCertificatePair'): face_utilities.unary_unary_inline(servicer.ReadCertificatePair),
+ ('protos.ECAP', 'RevokeCertificatePair'): face_utilities.unary_unary_inline(servicer.RevokeCertificatePair),
+ }
+ server_options = beta_implementations.server_options(request_deserializers=request_deserializers, response_serializers=response_serializers, thread_pool=pool, thread_pool_size=pool_size, default_timeout=default_timeout, maximum_timeout=maximum_timeout)
+ return beta_implementations.server(method_implementations, options=server_options)
+
+def beta_create_ECAP_stub(channel, host=None, metadata_transformer=None, pool=None, pool_size=None):
+ import ca_pb2
+ import ca_pb2
+ import ca_pb2
+ import ca_pb2
+ import ca_pb2
+ import ca_pb2
+ import ca_pb2
+ import ca_pb2
+ import ca_pb2
+ import ca_pb2
+ request_serializers = {
+ ('protos.ECAP', 'CreateCertificatePair'): ca_pb2.ECertCreateReq.SerializeToString,
+ ('protos.ECAP', 'ReadCACertificate'): ca_pb2.Empty.SerializeToString,
+ ('protos.ECAP', 'ReadCertificateByHash'): ca_pb2.Hash.SerializeToString,
+ ('protos.ECAP', 'ReadCertificatePair'): ca_pb2.ECertReadReq.SerializeToString,
+ ('protos.ECAP', 'RevokeCertificatePair'): ca_pb2.ECertRevokeReq.SerializeToString,
+ }
+ response_deserializers = {
+ ('protos.ECAP', 'CreateCertificatePair'): ca_pb2.ECertCreateResp.FromString,
+ ('protos.ECAP', 'ReadCACertificate'): ca_pb2.Cert.FromString,
+ ('protos.ECAP', 'ReadCertificateByHash'): ca_pb2.Cert.FromString,
+ ('protos.ECAP', 'ReadCertificatePair'): ca_pb2.CertPair.FromString,
+ ('protos.ECAP', 'RevokeCertificatePair'): ca_pb2.CAStatus.FromString,
+ }
+ cardinalities = {
+ 'CreateCertificatePair': cardinality.Cardinality.UNARY_UNARY,
+ 'ReadCACertificate': cardinality.Cardinality.UNARY_UNARY,
+ 'ReadCertificateByHash': cardinality.Cardinality.UNARY_UNARY,
+ 'ReadCertificatePair': cardinality.Cardinality.UNARY_UNARY,
+ 'RevokeCertificatePair': cardinality.Cardinality.UNARY_UNARY,
+ }
+ stub_options = beta_implementations.stub_options(host=host, metadata_transformer=metadata_transformer, request_serializers=request_serializers, response_deserializers=response_deserializers, thread_pool=pool, thread_pool_size=pool_size)
+ return beta_implementations.dynamic_stub(channel, 'protos.ECAP', cardinalities, options=stub_options)
+
+class BetaECAAServicer(object):
+ """admin service
+ """
+ def RegisterUser(self, request, context):
+ context.code(beta_interfaces.StatusCode.UNIMPLEMENTED)
+ def ReadUserSet(self, request, context):
+ context.code(beta_interfaces.StatusCode.UNIMPLEMENTED)
+ def RevokeCertificate(self, request, context):
+ """an admin can revoke any cert
+ """
+ context.code(beta_interfaces.StatusCode.UNIMPLEMENTED)
+ def PublishCRL(self, request, context):
+ """publishes CRL in the blockchain
+ """
+ context.code(beta_interfaces.StatusCode.UNIMPLEMENTED)
+
+class BetaECAAStub(object):
+ """admin service
+ """
+ def RegisterUser(self, request, timeout):
+ raise NotImplementedError()
+ RegisterUser.future = None
+ def ReadUserSet(self, request, timeout):
+ raise NotImplementedError()
+ ReadUserSet.future = None
+ def RevokeCertificate(self, request, timeout):
+ """an admin can revoke any cert
+ """
+ raise NotImplementedError()
+ RevokeCertificate.future = None
+ def PublishCRL(self, request, timeout):
+ """publishes CRL in the blockchain
+ """
+ raise NotImplementedError()
+ PublishCRL.future = None
+
+def beta_create_ECAA_server(servicer, pool=None, pool_size=None, default_timeout=None, maximum_timeout=None):
+ import ca_pb2
+ import ca_pb2
+ import ca_pb2
+ import ca_pb2
+ import ca_pb2
+ import ca_pb2
+ import ca_pb2
+ import ca_pb2
+ request_deserializers = {
+ ('protos.ECAA', 'PublishCRL'): ca_pb2.ECertCRLReq.FromString,
+ ('protos.ECAA', 'ReadUserSet'): ca_pb2.ReadUserSetReq.FromString,
+ ('protos.ECAA', 'RegisterUser'): ca_pb2.RegisterUserReq.FromString,
+ ('protos.ECAA', 'RevokeCertificate'): ca_pb2.ECertRevokeReq.FromString,
+ }
+ response_serializers = {
+ ('protos.ECAA', 'PublishCRL'): ca_pb2.CAStatus.SerializeToString,
+ ('protos.ECAA', 'ReadUserSet'): ca_pb2.UserSet.SerializeToString,
+ ('protos.ECAA', 'RegisterUser'): ca_pb2.Token.SerializeToString,
+ ('protos.ECAA', 'RevokeCertificate'): ca_pb2.CAStatus.SerializeToString,
+ }
+ method_implementations = {
+ ('protos.ECAA', 'PublishCRL'): face_utilities.unary_unary_inline(servicer.PublishCRL),
+ ('protos.ECAA', 'ReadUserSet'): face_utilities.unary_unary_inline(servicer.ReadUserSet),
+ ('protos.ECAA', 'RegisterUser'): face_utilities.unary_unary_inline(servicer.RegisterUser),
+ ('protos.ECAA', 'RevokeCertificate'): face_utilities.unary_unary_inline(servicer.RevokeCertificate),
+ }
+ server_options = beta_implementations.server_options(request_deserializers=request_deserializers, response_serializers=response_serializers, thread_pool=pool, thread_pool_size=pool_size, default_timeout=default_timeout, maximum_timeout=maximum_timeout)
+ return beta_implementations.server(method_implementations, options=server_options)
+
+def beta_create_ECAA_stub(channel, host=None, metadata_transformer=None, pool=None, pool_size=None):
+ import ca_pb2
+ import ca_pb2
+ import ca_pb2
+ import ca_pb2
+ import ca_pb2
+ import ca_pb2
+ import ca_pb2
+ import ca_pb2
+ request_serializers = {
+ ('protos.ECAA', 'PublishCRL'): ca_pb2.ECertCRLReq.SerializeToString,
+ ('protos.ECAA', 'ReadUserSet'): ca_pb2.ReadUserSetReq.SerializeToString,
+ ('protos.ECAA', 'RegisterUser'): ca_pb2.RegisterUserReq.SerializeToString,
+ ('protos.ECAA', 'RevokeCertificate'): ca_pb2.ECertRevokeReq.SerializeToString,
+ }
+ response_deserializers = {
+ ('protos.ECAA', 'PublishCRL'): ca_pb2.CAStatus.FromString,
+ ('protos.ECAA', 'ReadUserSet'): ca_pb2.UserSet.FromString,
+ ('protos.ECAA', 'RegisterUser'): ca_pb2.Token.FromString,
+ ('protos.ECAA', 'RevokeCertificate'): ca_pb2.CAStatus.FromString,
+ }
+ cardinalities = {
+ 'PublishCRL': cardinality.Cardinality.UNARY_UNARY,
+ 'ReadUserSet': cardinality.Cardinality.UNARY_UNARY,
+ 'RegisterUser': cardinality.Cardinality.UNARY_UNARY,
+ 'RevokeCertificate': cardinality.Cardinality.UNARY_UNARY,
+ }
+ stub_options = beta_implementations.stub_options(host=host, metadata_transformer=metadata_transformer, request_serializers=request_serializers, response_deserializers=response_deserializers, thread_pool=pool, thread_pool_size=pool_size)
+ return beta_implementations.dynamic_stub(channel, 'protos.ECAA', cardinalities, options=stub_options)
+
+class BetaTCAPServicer(object):
+ """Transaction Certificate Authority (TCA).
+
+ public service
+ """
+ def ReadCACertificate(self, request, context):
+ context.code(beta_interfaces.StatusCode.UNIMPLEMENTED)
+ def CreateCertificateSet(self, request, context):
+ context.code(beta_interfaces.StatusCode.UNIMPLEMENTED)
+ def ReadCertificate(self, request, context):
+ context.code(beta_interfaces.StatusCode.UNIMPLEMENTED)
+ def ReadCertificateSet(self, request, context):
+ context.code(beta_interfaces.StatusCode.UNIMPLEMENTED)
+ def RevokeCertificate(self, request, context):
+ """a user can revoke only his/her cert
+ """
+ context.code(beta_interfaces.StatusCode.UNIMPLEMENTED)
+ def RevokeCertificateSet(self, request, context):
+ """a user can revoke only his/her certs
+ """
+ context.code(beta_interfaces.StatusCode.UNIMPLEMENTED)
+
+class BetaTCAPStub(object):
+ """Transaction Certificate Authority (TCA).
+
+ public service
+ """
+ def ReadCACertificate(self, request, timeout):
+ raise NotImplementedError()
+ ReadCACertificate.future = None
+ def CreateCertificateSet(self, request, timeout):
+ raise NotImplementedError()
+ CreateCertificateSet.future = None
+ def ReadCertificate(self, request, timeout):
+ raise NotImplementedError()
+ ReadCertificate.future = None
+ def ReadCertificateSet(self, request, timeout):
+ raise NotImplementedError()
+ ReadCertificateSet.future = None
+ def RevokeCertificate(self, request, timeout):
+ """a user can revoke only his/her cert
+ """
+ raise NotImplementedError()
+ RevokeCertificate.future = None
+ def RevokeCertificateSet(self, request, timeout):
+ """a user can revoke only his/her certs
+ """
+ raise NotImplementedError()
+ RevokeCertificateSet.future = None
+
+def beta_create_TCAP_server(servicer, pool=None, pool_size=None, default_timeout=None, maximum_timeout=None):
+ import ca_pb2
+ import ca_pb2
+ import ca_pb2
+ import ca_pb2
+ import ca_pb2
+ import ca_pb2
+ import ca_pb2
+ import ca_pb2
+ import ca_pb2
+ import ca_pb2
+ import ca_pb2
+ import ca_pb2
+ request_deserializers = {
+ ('protos.TCAP', 'CreateCertificateSet'): ca_pb2.TCertCreateSetReq.FromString,
+ ('protos.TCAP', 'ReadCACertificate'): ca_pb2.Empty.FromString,
+ ('protos.TCAP', 'ReadCertificate'): ca_pb2.TCertReadReq.FromString,
+ ('protos.TCAP', 'ReadCertificateSet'): ca_pb2.TCertReadSetReq.FromString,
+ ('protos.TCAP', 'RevokeCertificate'): ca_pb2.TCertRevokeReq.FromString,
+ ('protos.TCAP', 'RevokeCertificateSet'): ca_pb2.TCertRevokeSetReq.FromString,
+ }
+ response_serializers = {
+ ('protos.TCAP', 'CreateCertificateSet'): ca_pb2.TCertCreateSetResp.SerializeToString,
+ ('protos.TCAP', 'ReadCACertificate'): ca_pb2.Cert.SerializeToString,
+ ('protos.TCAP', 'ReadCertificate'): ca_pb2.Cert.SerializeToString,
+ ('protos.TCAP', 'ReadCertificateSet'): ca_pb2.CertSet.SerializeToString,
+ ('protos.TCAP', 'RevokeCertificate'): ca_pb2.CAStatus.SerializeToString,
+ ('protos.TCAP', 'RevokeCertificateSet'): ca_pb2.CAStatus.SerializeToString,
+ }
+ method_implementations = {
+ ('protos.TCAP', 'CreateCertificateSet'): face_utilities.unary_unary_inline(servicer.CreateCertificateSet),
+ ('protos.TCAP', 'ReadCACertificate'): face_utilities.unary_unary_inline(servicer.ReadCACertificate),
+ ('protos.TCAP', 'ReadCertificate'): face_utilities.unary_unary_inline(servicer.ReadCertificate),
+ ('protos.TCAP', 'ReadCertificateSet'): face_utilities.unary_unary_inline(servicer.ReadCertificateSet),
+ ('protos.TCAP', 'RevokeCertificate'): face_utilities.unary_unary_inline(servicer.RevokeCertificate),
+ ('protos.TCAP', 'RevokeCertificateSet'): face_utilities.unary_unary_inline(servicer.RevokeCertificateSet),
+ }
+ server_options = beta_implementations.server_options(request_deserializers=request_deserializers, response_serializers=response_serializers, thread_pool=pool, thread_pool_size=pool_size, default_timeout=default_timeout, maximum_timeout=maximum_timeout)
+ return beta_implementations.server(method_implementations, options=server_options)
+
+def beta_create_TCAP_stub(channel, host=None, metadata_transformer=None, pool=None, pool_size=None):
+ import ca_pb2
+ import ca_pb2
+ import ca_pb2
+ import ca_pb2
+ import ca_pb2
+ import ca_pb2
+ import ca_pb2
+ import ca_pb2
+ import ca_pb2
+ import ca_pb2
+ import ca_pb2
+ import ca_pb2
+ request_serializers = {
+ ('protos.TCAP', 'CreateCertificateSet'): ca_pb2.TCertCreateSetReq.SerializeToString,
+ ('protos.TCAP', 'ReadCACertificate'): ca_pb2.Empty.SerializeToString,
+ ('protos.TCAP', 'ReadCertificate'): ca_pb2.TCertReadReq.SerializeToString,
+ ('protos.TCAP', 'ReadCertificateSet'): ca_pb2.TCertReadSetReq.SerializeToString,
+ ('protos.TCAP', 'RevokeCertificate'): ca_pb2.TCertRevokeReq.SerializeToString,
+ ('protos.TCAP', 'RevokeCertificateSet'): ca_pb2.TCertRevokeSetReq.SerializeToString,
+ }
+ response_deserializers = {
+ ('protos.TCAP', 'CreateCertificateSet'): ca_pb2.TCertCreateSetResp.FromString,
+ ('protos.TCAP', 'ReadCACertificate'): ca_pb2.Cert.FromString,
+ ('protos.TCAP', 'ReadCertificate'): ca_pb2.Cert.FromString,
+ ('protos.TCAP', 'ReadCertificateSet'): ca_pb2.CertSet.FromString,
+ ('protos.TCAP', 'RevokeCertificate'): ca_pb2.CAStatus.FromString,
+ ('protos.TCAP', 'RevokeCertificateSet'): ca_pb2.CAStatus.FromString,
+ }
+ cardinalities = {
+ 'CreateCertificateSet': cardinality.Cardinality.UNARY_UNARY,
+ 'ReadCACertificate': cardinality.Cardinality.UNARY_UNARY,
+ 'ReadCertificate': cardinality.Cardinality.UNARY_UNARY,
+ 'ReadCertificateSet': cardinality.Cardinality.UNARY_UNARY,
+ 'RevokeCertificate': cardinality.Cardinality.UNARY_UNARY,
+ 'RevokeCertificateSet': cardinality.Cardinality.UNARY_UNARY,
+ }
+ stub_options = beta_implementations.stub_options(host=host, metadata_transformer=metadata_transformer, request_serializers=request_serializers, response_deserializers=response_deserializers, thread_pool=pool, thread_pool_size=pool_size)
+ return beta_implementations.dynamic_stub(channel, 'protos.TCAP', cardinalities, options=stub_options)
+
+class BetaTCAAServicer(object):
+ """admin service
+ """
+ def ReadCertificateSets(self, request, context):
+ context.code(beta_interfaces.StatusCode.UNIMPLEMENTED)
+ def RevokeCertificate(self, request, context):
+ """an admin can revoke any cert
+ """
+ context.code(beta_interfaces.StatusCode.UNIMPLEMENTED)
+ def RevokeCertificateSet(self, request, context):
+ """an admin can revoke any cert
+ """
+ context.code(beta_interfaces.StatusCode.UNIMPLEMENTED)
+ def PublishCRL(self, request, context):
+ """publishes CRL in the blockchain
+ """
+ context.code(beta_interfaces.StatusCode.UNIMPLEMENTED)
+
+class BetaTCAAStub(object):
+ """admin service
+ """
+ def ReadCertificateSets(self, request, timeout):
+ raise NotImplementedError()
+ ReadCertificateSets.future = None
+ def RevokeCertificate(self, request, timeout):
+ """an admin can revoke any cert
+ """
+ raise NotImplementedError()
+ RevokeCertificate.future = None
+ def RevokeCertificateSet(self, request, timeout):
+ """an admin can revoke any cert
+ """
+ raise NotImplementedError()
+ RevokeCertificateSet.future = None
+ def PublishCRL(self, request, timeout):
+ """publishes CRL in the blockchain
+ """
+ raise NotImplementedError()
+ PublishCRL.future = None
+
+def beta_create_TCAA_server(servicer, pool=None, pool_size=None, default_timeout=None, maximum_timeout=None):
+ import ca_pb2
+ import ca_pb2
+ import ca_pb2
+ import ca_pb2
+ import ca_pb2
+ import ca_pb2
+ import ca_pb2
+ import ca_pb2
+ request_deserializers = {
+ ('protos.TCAA', 'PublishCRL'): ca_pb2.TCertCRLReq.FromString,
+ ('protos.TCAA', 'ReadCertificateSets'): ca_pb2.TCertReadSetsReq.FromString,
+ ('protos.TCAA', 'RevokeCertificate'): ca_pb2.TCertRevokeReq.FromString,
+ ('protos.TCAA', 'RevokeCertificateSet'): ca_pb2.TCertRevokeSetReq.FromString,
+ }
+ response_serializers = {
+ ('protos.TCAA', 'PublishCRL'): ca_pb2.CAStatus.SerializeToString,
+ ('protos.TCAA', 'ReadCertificateSets'): ca_pb2.CertSets.SerializeToString,
+ ('protos.TCAA', 'RevokeCertificate'): ca_pb2.CAStatus.SerializeToString,
+ ('protos.TCAA', 'RevokeCertificateSet'): ca_pb2.CAStatus.SerializeToString,
+ }
+ method_implementations = {
+ ('protos.TCAA', 'PublishCRL'): face_utilities.unary_unary_inline(servicer.PublishCRL),
+ ('protos.TCAA', 'ReadCertificateSets'): face_utilities.unary_unary_inline(servicer.ReadCertificateSets),
+ ('protos.TCAA', 'RevokeCertificate'): face_utilities.unary_unary_inline(servicer.RevokeCertificate),
+ ('protos.TCAA', 'RevokeCertificateSet'): face_utilities.unary_unary_inline(servicer.RevokeCertificateSet),
+ }
+ server_options = beta_implementations.server_options(request_deserializers=request_deserializers, response_serializers=response_serializers, thread_pool=pool, thread_pool_size=pool_size, default_timeout=default_timeout, maximum_timeout=maximum_timeout)
+ return beta_implementations.server(method_implementations, options=server_options)
+
+def beta_create_TCAA_stub(channel, host=None, metadata_transformer=None, pool=None, pool_size=None):
+ import ca_pb2
+ import ca_pb2
+ import ca_pb2
+ import ca_pb2
+ import ca_pb2
+ import ca_pb2
+ import ca_pb2
+ import ca_pb2
+ request_serializers = {
+ ('protos.TCAA', 'PublishCRL'): ca_pb2.TCertCRLReq.SerializeToString,
+ ('protos.TCAA', 'ReadCertificateSets'): ca_pb2.TCertReadSetsReq.SerializeToString,
+ ('protos.TCAA', 'RevokeCertificate'): ca_pb2.TCertRevokeReq.SerializeToString,
+ ('protos.TCAA', 'RevokeCertificateSet'): ca_pb2.TCertRevokeSetReq.SerializeToString,
+ }
+ response_deserializers = {
+ ('protos.TCAA', 'PublishCRL'): ca_pb2.CAStatus.FromString,
+ ('protos.TCAA', 'ReadCertificateSets'): ca_pb2.CertSets.FromString,
+ ('protos.TCAA', 'RevokeCertificate'): ca_pb2.CAStatus.FromString,
+ ('protos.TCAA', 'RevokeCertificateSet'): ca_pb2.CAStatus.FromString,
+ }
+ cardinalities = {
+ 'PublishCRL': cardinality.Cardinality.UNARY_UNARY,
+ 'ReadCertificateSets': cardinality.Cardinality.UNARY_UNARY,
+ 'RevokeCertificate': cardinality.Cardinality.UNARY_UNARY,
+ 'RevokeCertificateSet': cardinality.Cardinality.UNARY_UNARY,
+ }
+ stub_options = beta_implementations.stub_options(host=host, metadata_transformer=metadata_transformer, request_serializers=request_serializers, response_deserializers=response_deserializers, thread_pool=pool, thread_pool_size=pool_size)
+ return beta_implementations.dynamic_stub(channel, 'protos.TCAA', cardinalities, options=stub_options)
+
+class BetaTLSCAPServicer(object):
+ """TLS Certificate Authority (TLSCA)
+
+ public service
+ """
+ def ReadCACertificate(self, request, context):
+ context.code(beta_interfaces.StatusCode.UNIMPLEMENTED)
+ def CreateCertificate(self, request, context):
+ context.code(beta_interfaces.StatusCode.UNIMPLEMENTED)
+ def ReadCertificate(self, request, context):
+ context.code(beta_interfaces.StatusCode.UNIMPLEMENTED)
+ def RevokeCertificate(self, request, context):
+ """a user can revoke only his/her cert
+ """
+ context.code(beta_interfaces.StatusCode.UNIMPLEMENTED)
+
+class BetaTLSCAPStub(object):
+ """TLS Certificate Authority (TLSCA)
+
+ public service
+ """
+ def ReadCACertificate(self, request, timeout):
+ raise NotImplementedError()
+ ReadCACertificate.future = None
+ def CreateCertificate(self, request, timeout):
+ raise NotImplementedError()
+ CreateCertificate.future = None
+ def ReadCertificate(self, request, timeout):
+ raise NotImplementedError()
+ ReadCertificate.future = None
+ def RevokeCertificate(self, request, timeout):
+ """a user can revoke only his/her cert
+ """
+ raise NotImplementedError()
+ RevokeCertificate.future = None
+
+def beta_create_TLSCAP_server(servicer, pool=None, pool_size=None, default_timeout=None, maximum_timeout=None):
+ import ca_pb2
+ import ca_pb2
+ import ca_pb2
+ import ca_pb2
+ import ca_pb2
+ import ca_pb2
+ import ca_pb2
+ import ca_pb2
+ request_deserializers = {
+ ('protos.TLSCAP', 'CreateCertificate'): ca_pb2.TLSCertCreateReq.FromString,
+ ('protos.TLSCAP', 'ReadCACertificate'): ca_pb2.Empty.FromString,
+ ('protos.TLSCAP', 'ReadCertificate'): ca_pb2.TLSCertReadReq.FromString,
+ ('protos.TLSCAP', 'RevokeCertificate'): ca_pb2.TLSCertRevokeReq.FromString,
+ }
+ response_serializers = {
+ ('protos.TLSCAP', 'CreateCertificate'): ca_pb2.TLSCertCreateResp.SerializeToString,
+ ('protos.TLSCAP', 'ReadCACertificate'): ca_pb2.Cert.SerializeToString,
+ ('protos.TLSCAP', 'ReadCertificate'): ca_pb2.Cert.SerializeToString,
+ ('protos.TLSCAP', 'RevokeCertificate'): ca_pb2.CAStatus.SerializeToString,
+ }
+ method_implementations = {
+ ('protos.TLSCAP', 'CreateCertificate'): face_utilities.unary_unary_inline(servicer.CreateCertificate),
+ ('protos.TLSCAP', 'ReadCACertificate'): face_utilities.unary_unary_inline(servicer.ReadCACertificate),
+ ('protos.TLSCAP', 'ReadCertificate'): face_utilities.unary_unary_inline(servicer.ReadCertificate),
+ ('protos.TLSCAP', 'RevokeCertificate'): face_utilities.unary_unary_inline(servicer.RevokeCertificate),
+ }
+ server_options = beta_implementations.server_options(request_deserializers=request_deserializers, response_serializers=response_serializers, thread_pool=pool, thread_pool_size=pool_size, default_timeout=default_timeout, maximum_timeout=maximum_timeout)
+ return beta_implementations.server(method_implementations, options=server_options)
+
+def beta_create_TLSCAP_stub(channel, host=None, metadata_transformer=None, pool=None, pool_size=None):
+ import ca_pb2
+ import ca_pb2
+ import ca_pb2
+ import ca_pb2
+ import ca_pb2
+ import ca_pb2
+ import ca_pb2
+ import ca_pb2
+ request_serializers = {
+ ('protos.TLSCAP', 'CreateCertificate'): ca_pb2.TLSCertCreateReq.SerializeToString,
+ ('protos.TLSCAP', 'ReadCACertificate'): ca_pb2.Empty.SerializeToString,
+ ('protos.TLSCAP', 'ReadCertificate'): ca_pb2.TLSCertReadReq.SerializeToString,
+ ('protos.TLSCAP', 'RevokeCertificate'): ca_pb2.TLSCertRevokeReq.SerializeToString,
+ }
+ response_deserializers = {
+ ('protos.TLSCAP', 'CreateCertificate'): ca_pb2.TLSCertCreateResp.FromString,
+ ('protos.TLSCAP', 'ReadCACertificate'): ca_pb2.Cert.FromString,
+ ('protos.TLSCAP', 'ReadCertificate'): ca_pb2.Cert.FromString,
+ ('protos.TLSCAP', 'RevokeCertificate'): ca_pb2.CAStatus.FromString,
+ }
+ cardinalities = {
+ 'CreateCertificate': cardinality.Cardinality.UNARY_UNARY,
+ 'ReadCACertificate': cardinality.Cardinality.UNARY_UNARY,
+ 'ReadCertificate': cardinality.Cardinality.UNARY_UNARY,
+ 'RevokeCertificate': cardinality.Cardinality.UNARY_UNARY,
+ }
+ stub_options = beta_implementations.stub_options(host=host, metadata_transformer=metadata_transformer, request_serializers=request_serializers, response_deserializers=response_deserializers, thread_pool=pool, thread_pool_size=pool_size)
+ return beta_implementations.dynamic_stub(channel, 'protos.TLSCAP', cardinalities, options=stub_options)
+
+class BetaTLSCAAServicer(object):
+ """admin service
+ """
+ def RevokeCertificate(self, request, context):
+ """an admin can revoke any cert
+ """
+ context.code(beta_interfaces.StatusCode.UNIMPLEMENTED)
+
+class BetaTLSCAAStub(object):
+ """admin service
+ """
+ def RevokeCertificate(self, request, timeout):
+ """an admin can revoke any cert
+ """
+ raise NotImplementedError()
+ RevokeCertificate.future = None
+
+def beta_create_TLSCAA_server(servicer, pool=None, pool_size=None, default_timeout=None, maximum_timeout=None):
+ import ca_pb2
+ import ca_pb2
+ request_deserializers = {
+ ('protos.TLSCAA', 'RevokeCertificate'): ca_pb2.TLSCertRevokeReq.FromString,
+ }
+ response_serializers = {
+ ('protos.TLSCAA', 'RevokeCertificate'): ca_pb2.CAStatus.SerializeToString,
+ }
+ method_implementations = {
+ ('protos.TLSCAA', 'RevokeCertificate'): face_utilities.unary_unary_inline(servicer.RevokeCertificate),
+ }
+ server_options = beta_implementations.server_options(request_deserializers=request_deserializers, response_serializers=response_serializers, thread_pool=pool, thread_pool_size=pool_size, default_timeout=default_timeout, maximum_timeout=maximum_timeout)
+ return beta_implementations.server(method_implementations, options=server_options)
+
+def beta_create_TLSCAA_stub(channel, host=None, metadata_transformer=None, pool=None, pool_size=None):
+ import ca_pb2
+ import ca_pb2
+ request_serializers = {
+ ('protos.TLSCAA', 'RevokeCertificate'): ca_pb2.TLSCertRevokeReq.SerializeToString,
+ }
+ response_deserializers = {
+ ('protos.TLSCAA', 'RevokeCertificate'): ca_pb2.CAStatus.FromString,
+ }
+ cardinalities = {
+ 'RevokeCertificate': cardinality.Cardinality.UNARY_UNARY,
+ }
+ stub_options = beta_implementations.stub_options(host=host, metadata_transformer=metadata_transformer, request_serializers=request_serializers, response_deserializers=response_deserializers, thread_pool=pool, thread_pool_size=pool_size)
+ return beta_implementations.dynamic_stub(channel, 'protos.TLSCAA', cardinalities, options=stub_options)
+# @@protoc_insertion_point(module_scope)
diff --git a/bddtests/chaincode/go/table/table.go b/bddtests/chaincode/go/table/table.go
new file mode 100644
index 00000000000..f489f12c62a
--- /dev/null
+++ b/bddtests/chaincode/go/table/table.go
@@ -0,0 +1,537 @@
+/*
+Copyright IBM Corp. 2016 All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package main
+
+import (
+ "encoding/json"
+ "errors"
+ "fmt"
+ "strconv"
+
+ "github.com/hyperledger/fabric/core/chaincode/shim"
+)
+
+// SimpleChaincode example simple Chaincode implementation
+type SimpleChaincode struct {
+}
+
+// Init create tables for tests
+func (t *SimpleChaincode) Init(stub *shim.ChaincodeStub, function string, args []string) ([]byte, error) {
+ // Create table one
+ err := createTableOne(stub)
+ if err != nil {
+ return nil, fmt.Errorf("Error creating table one during init. %s", err)
+ }
+
+ // Create table two
+ err = createTableTwo(stub)
+ if err != nil {
+ return nil, fmt.Errorf("Error creating table two during init. %s", err)
+ }
+
+ // Create table three
+ err = createTableThree(stub)
+ if err != nil {
+ return nil, fmt.Errorf("Error creating table three during init. %s", err)
+ }
+
+ // Create table four
+ err = createTableFour(stub)
+ if err != nil {
+ return nil, fmt.Errorf("Error creating table four during init. %s", err)
+ }
+
+ return nil, nil
+}
+
+// Invoke callback representing the invocation of a chaincode
+// This chaincode will manage two accounts A and B and will transfer X units from A to B upon invoke
+func (t *SimpleChaincode) Invoke(stub *shim.ChaincodeStub, function string, args []string) ([]byte, error) {
+
+ switch function {
+
+ case "insertRowTableOne":
+ if len(args) < 3 {
+ return nil, errors.New("insertTableOne failed. Must include 3 column values")
+ }
+
+ col1Val := args[0]
+ col2Int, err := strconv.ParseInt(args[1], 10, 32)
+ if err != nil {
+ return nil, errors.New("insertTableOne failed. arg[1] must be convertable to int32")
+ }
+ col2Val := int32(col2Int)
+ col3Int, err := strconv.ParseInt(args[2], 10, 32)
+ if err != nil {
+ return nil, errors.New("insertTableOne failed. arg[2] must be convertable to int32")
+ }
+ col3Val := int32(col3Int)
+
+ var columns []*shim.Column
+ col1 := shim.Column{Value: &shim.Column_String_{String_: col1Val}}
+ col2 := shim.Column{Value: &shim.Column_Int32{Int32: col2Val}}
+ col3 := shim.Column{Value: &shim.Column_Int32{Int32: col3Val}}
+ columns = append(columns, &col1)
+ columns = append(columns, &col2)
+ columns = append(columns, &col3)
+
+ row := shim.Row{Columns: columns}
+ ok, err := stub.InsertRow("tableOne", row)
+ if err != nil {
+ return nil, fmt.Errorf("insertTableOne operation failed. %s", err)
+ }
+ if !ok {
+ return nil, errors.New("insertTableOne operation failed. Row with given key already exists")
+ }
+
+ case "insertRowTableTwo":
+ if len(args) < 4 {
+ return nil, errors.New("insertRowTableTwo failed. Must include 4 column values")
+ }
+
+ col1Val := args[0]
+ col2Int, err := strconv.ParseInt(args[1], 10, 32)
+ if err != nil {
+ return nil, errors.New("insertRowTableTwo failed. arg[1] must be convertable to int32")
+ }
+ col2Val := int32(col2Int)
+ col3Int, err := strconv.ParseInt(args[2], 10, 32)
+ if err != nil {
+ return nil, errors.New("insertRowTableTwo failed. arg[2] must be convertable to int32")
+ }
+ col3Val := int32(col3Int)
+ col4Val := args[3]
+
+ var columns []*shim.Column
+ col1 := shim.Column{Value: &shim.Column_String_{String_: col1Val}}
+ col2 := shim.Column{Value: &shim.Column_Int32{Int32: col2Val}}
+ col3 := shim.Column{Value: &shim.Column_Int32{Int32: col3Val}}
+ col4 := shim.Column{Value: &shim.Column_String_{String_: col4Val}}
+ columns = append(columns, &col1)
+ columns = append(columns, &col2)
+ columns = append(columns, &col3)
+ columns = append(columns, &col4)
+
+ row := shim.Row{Columns: columns}
+ ok, err := stub.InsertRow("tableTwo", row)
+ if err != nil {
+ return nil, fmt.Errorf("insertRowTableTwo operation failed. %s", err)
+ }
+ if !ok {
+ return nil, errors.New("insertRowTableTwo operation failed. Row with given key already exists")
+ }
+
+ case "insertRowTableThree":
+ if len(args) < 7 {
+ return nil, errors.New("insertRowTableThree failed. Must include 7 column values")
+ }
+
+ col1Val := args[0]
+
+ col2Int, err := strconv.ParseInt(args[1], 10, 32)
+ if err != nil {
+ return nil, errors.New("insertRowTableThree failed. arg[1] must be convertable to int32")
+ }
+ col2Val := int32(col2Int)
+
+ col3Val, err := strconv.ParseInt(args[2], 10, 64)
+ if err != nil {
+ return nil, errors.New("insertRowTableThree failed. arg[2] must be convertable to int64")
+ }
+
+ col4Uint, err := strconv.ParseUint(args[3], 10, 32)
+ if err != nil {
+ return nil, errors.New("insertRowTableThree failed. arg[3] must be convertable to uint32")
+ }
+ col4Val := uint32(col4Uint)
+
+ col5Val, err := strconv.ParseUint(args[4], 10, 64)
+ if err != nil {
+ return nil, errors.New("insertRowTableThree failed. arg[4] must be convertable to uint64")
+ }
+
+ col6Val := []byte(args[5])
+
+ col7Val, err := strconv.ParseBool(args[6])
+ if err != nil {
+ return nil, errors.New("insertRowTableThree failed. arg[6] must be convertable to bool")
+ }
+
+ var columns []*shim.Column
+ col1 := shim.Column{Value: &shim.Column_String_{String_: col1Val}}
+ col2 := shim.Column{Value: &shim.Column_Int32{Int32: col2Val}}
+ col3 := shim.Column{Value: &shim.Column_Int64{Int64: col3Val}}
+ col4 := shim.Column{Value: &shim.Column_Uint32{Uint32: col4Val}}
+ col5 := shim.Column{Value: &shim.Column_Uint64{Uint64: col5Val}}
+ col6 := shim.Column{Value: &shim.Column_Bytes{Bytes: col6Val}}
+ col7 := shim.Column{Value: &shim.Column_Bool{Bool: col7Val}}
+ columns = append(columns, &col1)
+ columns = append(columns, &col2)
+ columns = append(columns, &col3)
+ columns = append(columns, &col4)
+ columns = append(columns, &col5)
+ columns = append(columns, &col6)
+ columns = append(columns, &col7)
+
+ row := shim.Row{Columns: columns}
+ ok, err := stub.InsertRow("tableThree", row)
+ if err != nil {
+ return nil, fmt.Errorf("insertRowTableThree operation failed. %s", err)
+ }
+ if !ok {
+ return nil, errors.New("insertRowTableThree operation failed. Row with given key already exists")
+ }
+
+ case "insertRowTableFour":
+ if len(args) < 1 {
+ return nil, errors.New("insertRowTableFour failed. Must include 1 column value1")
+ }
+
+ col1Val := args[0]
+
+ var columns []*shim.Column
+ col1 := shim.Column{Value: &shim.Column_String_{String_: col1Val}}
+ columns = append(columns, &col1)
+
+ row := shim.Row{Columns: columns}
+ ok, err := stub.InsertRow("tableFour", row)
+ if err != nil {
+ return nil, fmt.Errorf("insertRowTableFour operation failed. %s", err)
+ }
+ if !ok {
+ return nil, errors.New("insertRowTableFour operation failed. Row with given key already exists")
+ }
+
+ case "deleteRowTableOne":
+ if len(args) < 1 {
+ return nil, errors.New("deleteRowTableOne failed. Must include 1 key value")
+ }
+
+ col1Val := args[0]
+ var columns []shim.Column
+ col1 := shim.Column{Value: &shim.Column_String_{String_: col1Val}}
+ columns = append(columns, col1)
+
+ err := stub.DeleteRow("tableOne", columns)
+ if err != nil {
+ return nil, fmt.Errorf("deleteRowTableOne operation failed. %s", err)
+ }
+
+ case "replaceRowTableOne":
+ if len(args) < 3 {
+ return nil, errors.New("replaceRowTableOne failed. Must include 3 column values")
+ }
+
+ col1Val := args[0]
+ col2Int, err := strconv.ParseInt(args[1], 10, 32)
+ if err != nil {
+ return nil, errors.New("replaceRowTableOne failed. arg[1] must be convertable to int32")
+ }
+ col2Val := int32(col2Int)
+ col3Int, err := strconv.ParseInt(args[2], 10, 32)
+ if err != nil {
+ return nil, errors.New("replaceRowTableOne failed. arg[2] must be convertable to int32")
+ }
+ col3Val := int32(col3Int)
+
+ var columns []*shim.Column
+ col1 := shim.Column{Value: &shim.Column_String_{String_: col1Val}}
+ col2 := shim.Column{Value: &shim.Column_Int32{Int32: col2Val}}
+ col3 := shim.Column{Value: &shim.Column_Int32{Int32: col3Val}}
+ columns = append(columns, &col1)
+ columns = append(columns, &col2)
+ columns = append(columns, &col3)
+
+ row := shim.Row{Columns: columns}
+ ok, err := stub.ReplaceRow("tableOne", row)
+ if err != nil {
+ return nil, fmt.Errorf("replaceRowTableOne operation failed. %s", err)
+ }
+ if !ok {
+ return nil, errors.New("replaceRowTableOne operation failed. Row with given key does not exist")
+ }
+
+ case "deleteAndRecreateTableOne":
+
+ err := stub.DeleteTable("tableOne")
+ if err != nil {
+ return nil, fmt.Errorf("deleteAndRecreateTableOne operation failed. Error deleting table. %s", err)
+ }
+
+ err = createTableOne(stub)
+ if err != nil {
+ return nil, fmt.Errorf("deleteAndRecreateTableOne operation failed. Error creating table. %s", err)
+ }
+
+ return nil, nil
+
+ default:
+ return nil, errors.New("Unsupported operation")
+ }
+ return nil, nil
+}
+
+// Query callback representing the query of a chaincode
+func (t *SimpleChaincode) Query(stub *shim.ChaincodeStub, function string, args []string) ([]byte, error) {
+ switch function {
+
+ case "getRowTableOne":
+ if len(args) < 1 {
+ return nil, errors.New("getRowTableOne failed. Must include 1 key value")
+ }
+
+ col1Val := args[0]
+ var columns []shim.Column
+ col1 := shim.Column{Value: &shim.Column_String_{String_: col1Val}}
+ columns = append(columns, col1)
+
+ row, err := stub.GetRow("tableOne", columns)
+ if err != nil {
+ return nil, fmt.Errorf("getRowTableOne operation failed. %s", err)
+ }
+
+ rowString := fmt.Sprintf("%s", row)
+ return []byte(rowString), nil
+
+ case "getRowTableTwo":
+ if len(args) < 3 {
+ return nil, errors.New("getRowTableTwo failed. Must include 3 key values")
+ }
+
+ col1Val := args[0]
+ col2Int, err := strconv.ParseInt(args[1], 10, 32)
+ if err != nil {
+ return nil, errors.New("getRowTableTwo failed. arg[1] must be convertable to int32")
+ }
+ col2Val := int32(col2Int)
+ col3Val := args[2]
+ var columns []shim.Column
+ col1 := shim.Column{Value: &shim.Column_String_{String_: col1Val}}
+ col2 := shim.Column{Value: &shim.Column_Int32{Int32: col2Val}}
+ col3 := shim.Column{Value: &shim.Column_String_{String_: col3Val}}
+ columns = append(columns, col1)
+ columns = append(columns, col2)
+ columns = append(columns, col3)
+
+ row, err := stub.GetRow("tableTwo", columns)
+ if err != nil {
+ return nil, fmt.Errorf("getRowTableTwo operation failed. %s", err)
+ }
+
+ rowString := fmt.Sprintf("%s", row)
+ return []byte(rowString), nil
+
+ case "getRowTableThree":
+ if len(args) < 1 {
+ return nil, errors.New("getRowTableThree failed. Must include 1 key value")
+ }
+
+ col1Val := args[0]
+
+ var columns []shim.Column
+ col1 := shim.Column{Value: &shim.Column_String_{String_: col1Val}}
+ columns = append(columns, col1)
+
+ row, err := stub.GetRow("tableThree", columns)
+ if err != nil {
+ return nil, fmt.Errorf("getRowTableThree operation failed. %s", err)
+ }
+
+ rowString := fmt.Sprintf("%s", row)
+ return []byte(rowString), nil
+
+ case "getRowsTableTwo":
+ if len(args) < 1 {
+ return nil, errors.New("getRowsTableTwo failed. Must include at least key values")
+ }
+
+ var columns []shim.Column
+
+ col1Val := args[0]
+ col1 := shim.Column{Value: &shim.Column_String_{String_: col1Val}}
+ columns = append(columns, col1)
+
+ if len(args) > 1 {
+ col2Int, err := strconv.ParseInt(args[1], 10, 32)
+ if err != nil {
+ return nil, errors.New("getRowsTableTwo failed. arg[1] must be convertable to int32")
+ }
+ col2Val := int32(col2Int)
+ col2 := shim.Column{Value: &shim.Column_Int32{Int32: col2Val}}
+ columns = append(columns, col2)
+ }
+
+ rowChannel, err := stub.GetRows("tableTwo", columns)
+ if err != nil {
+ return nil, fmt.Errorf("getRowsTableTwo operation failed. %s", err)
+ }
+
+ var rows []shim.Row
+ for {
+ select {
+ case row, ok := <-rowChannel:
+ if !ok {
+ rowChannel = nil
+ } else {
+ rows = append(rows, row)
+ }
+ }
+ if rowChannel == nil {
+ break
+ }
+ }
+
+ jsonRows, err := json.Marshal(rows)
+ if err != nil {
+ return nil, fmt.Errorf("getRowsTableTwo operation failed. Error marshaling JSON: %s", err)
+ }
+
+ return jsonRows, nil
+
+ case "getRowTableFour":
+ if len(args) < 1 {
+ return nil, errors.New("getRowTableFour failed. Must include 1 key")
+ }
+
+ col1Val := args[0]
+ var columns []shim.Column
+ col1 := shim.Column{Value: &shim.Column_String_{String_: col1Val}}
+ columns = append(columns, col1)
+
+ row, err := stub.GetRow("tableFour", columns)
+ if err != nil {
+ return nil, fmt.Errorf("getRowTableFour operation failed. %s", err)
+ }
+
+ rowString := fmt.Sprintf("%s", row)
+ return []byte(rowString), nil
+
+ case "getRowsTableFour":
+ if len(args) < 1 {
+ return nil, errors.New("getRowsTableFour failed. Must include 1 key value")
+ }
+
+ var columns []shim.Column
+
+ col1Val := args[0]
+ col1 := shim.Column{Value: &shim.Column_String_{String_: col1Val}}
+ columns = append(columns, col1)
+
+ rowChannel, err := stub.GetRows("tableFour", columns)
+ if err != nil {
+ return nil, fmt.Errorf("getRowsTableFour operation failed. %s", err)
+ }
+
+ var rows []shim.Row
+ for {
+ select {
+ case row, ok := <-rowChannel:
+ if !ok {
+ rowChannel = nil
+ } else {
+ rows = append(rows, row)
+ }
+ }
+ if rowChannel == nil {
+ break
+ }
+ }
+
+ jsonRows, err := json.Marshal(rows)
+ if err != nil {
+ return nil, fmt.Errorf("getRowsTableFour operation failed. Error marshaling JSON: %s", err)
+ }
+
+ return jsonRows, nil
+
+ default:
+ return nil, errors.New("Unsupported operation")
+ }
+}
+
+func main() {
+ err := shim.Start(new(SimpleChaincode))
+ if err != nil {
+ fmt.Printf("Error starting Simple chaincode: %s", err)
+ }
+}
+
+func createTableOne(stub *shim.ChaincodeStub) error {
+ // Create table one
+ var columnDefsTableOne []*shim.ColumnDefinition
+ columnOneTableOneDef := shim.ColumnDefinition{Name: "colOneTableOne",
+ Type: shim.ColumnDefinition_STRING, Key: true}
+ columnTwoTableOneDef := shim.ColumnDefinition{Name: "colTwoTableOne",
+ Type: shim.ColumnDefinition_INT32, Key: false}
+ columnThreeTableOneDef := shim.ColumnDefinition{Name: "colThreeTableOne",
+ Type: shim.ColumnDefinition_INT32, Key: false}
+ columnDefsTableOne = append(columnDefsTableOne, &columnOneTableOneDef)
+ columnDefsTableOne = append(columnDefsTableOne, &columnTwoTableOneDef)
+ columnDefsTableOne = append(columnDefsTableOne, &columnThreeTableOneDef)
+ return stub.CreateTable("tableOne", columnDefsTableOne)
+}
+
+func createTableTwo(stub *shim.ChaincodeStub) error {
+ var columnDefsTableTwo []*shim.ColumnDefinition
+ columnOneTableTwoDef := shim.ColumnDefinition{Name: "colOneTableTwo",
+ Type: shim.ColumnDefinition_STRING, Key: true}
+ columnTwoTableTwoDef := shim.ColumnDefinition{Name: "colTwoTableTwo",
+ Type: shim.ColumnDefinition_INT32, Key: false}
+ columnThreeTableTwoDef := shim.ColumnDefinition{Name: "colThreeTableThree",
+ Type: shim.ColumnDefinition_INT32, Key: true}
+ columnFourTableTwoDef := shim.ColumnDefinition{Name: "colFourTableFour",
+ Type: shim.ColumnDefinition_STRING, Key: true}
+ columnDefsTableTwo = append(columnDefsTableTwo, &columnOneTableTwoDef)
+ columnDefsTableTwo = append(columnDefsTableTwo, &columnTwoTableTwoDef)
+ columnDefsTableTwo = append(columnDefsTableTwo, &columnThreeTableTwoDef)
+ columnDefsTableTwo = append(columnDefsTableTwo, &columnFourTableTwoDef)
+ return stub.CreateTable("tableTwo", columnDefsTableTwo)
+}
+
+func createTableThree(stub *shim.ChaincodeStub) error {
+ var columnDefsTableThree []*shim.ColumnDefinition
+ columnOneTableThreeDef := shim.ColumnDefinition{Name: "colOneTableThree",
+ Type: shim.ColumnDefinition_STRING, Key: true}
+ columnTwoTableThreeDef := shim.ColumnDefinition{Name: "colTwoTableThree",
+ Type: shim.ColumnDefinition_INT32, Key: false}
+ columnThreeTableThreeDef := shim.ColumnDefinition{Name: "colThreeTableThree",
+ Type: shim.ColumnDefinition_INT64, Key: false}
+ columnFourTableThreeDef := shim.ColumnDefinition{Name: "colFourTableFour",
+ Type: shim.ColumnDefinition_UINT32, Key: false}
+ columnFiveTableThreeDef := shim.ColumnDefinition{Name: "colFourTableFive",
+ Type: shim.ColumnDefinition_UINT64, Key: false}
+ columnSixTableThreeDef := shim.ColumnDefinition{Name: "colFourTableSix",
+ Type: shim.ColumnDefinition_BYTES, Key: false}
+ columnSevenTableThreeDef := shim.ColumnDefinition{Name: "colFourTableSeven",
+ Type: shim.ColumnDefinition_BOOL, Key: false}
+ columnDefsTableThree = append(columnDefsTableThree, &columnOneTableThreeDef)
+ columnDefsTableThree = append(columnDefsTableThree, &columnTwoTableThreeDef)
+ columnDefsTableThree = append(columnDefsTableThree, &columnThreeTableThreeDef)
+ columnDefsTableThree = append(columnDefsTableThree, &columnFourTableThreeDef)
+ columnDefsTableThree = append(columnDefsTableThree, &columnFiveTableThreeDef)
+ columnDefsTableThree = append(columnDefsTableThree, &columnSixTableThreeDef)
+ columnDefsTableThree = append(columnDefsTableThree, &columnSevenTableThreeDef)
+ return stub.CreateTable("tableThree", columnDefsTableThree)
+}
+
+func createTableFour(stub *shim.ChaincodeStub) error {
+ var columnDefsTableFour []*shim.ColumnDefinition
+ columnOneTableFourDef := shim.ColumnDefinition{Name: "colOneTableFour",
+ Type: shim.ColumnDefinition_STRING, Key: true}
+ columnDefsTableFour = append(columnDefsTableFour, &columnOneTableFourDef)
+ return stub.CreateTable("tableFour", columnDefsTableFour)
+}
diff --git a/bddtests/chaincode_pb2.py b/bddtests/chaincode_pb2.py
new file mode 100644
index 00000000000..4371f1839b3
--- /dev/null
+++ b/bddtests/chaincode_pb2.py
@@ -0,0 +1,963 @@
+# Generated by the protocol buffer compiler. DO NOT EDIT!
+# source: chaincode.proto
+
+import sys
+_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
+from google.protobuf.internal import enum_type_wrapper
+from google.protobuf import descriptor as _descriptor
+from google.protobuf import message as _message
+from google.protobuf import reflection as _reflection
+from google.protobuf import symbol_database as _symbol_database
+from google.protobuf import descriptor_pb2
+# @@protoc_insertion_point(imports)
+
+_sym_db = _symbol_database.Default()
+
+
+from google.protobuf import timestamp_pb2 as google_dot_protobuf_dot_timestamp__pb2
+
+
+DESCRIPTOR = _descriptor.FileDescriptor(
+ name='chaincode.proto',
+ package='protos',
+ syntax='proto3',
+ serialized_pb=_b('\n\x0f\x63haincode.proto\x12\x06protos\x1a\x1fgoogle/protobuf/timestamp.proto\")\n\x0b\x43haincodeID\x12\x0c\n\x04path\x18\x01 \x01(\t\x12\x0c\n\x04name\x18\x02 \x01(\t\"0\n\x0e\x43haincodeInput\x12\x10\n\x08\x66unction\x18\x01 \x01(\t\x12\x0c\n\x04\x61rgs\x18\x02 \x03(\t\"\xb8\x02\n\rChaincodeSpec\x12(\n\x04type\x18\x01 \x01(\x0e\x32\x1a.protos.ChaincodeSpec.Type\x12(\n\x0b\x63haincodeID\x18\x02 \x01(\x0b\x32\x13.protos.ChaincodeID\x12\'\n\x07\x63torMsg\x18\x03 \x01(\x0b\x32\x16.protos.ChaincodeInput\x12\x0f\n\x07timeout\x18\x04 \x01(\x05\x12\x15\n\rsecureContext\x18\x05 \x01(\t\x12:\n\x14\x63onfidentialityLevel\x18\x06 \x01(\x0e\x32\x1c.protos.ConfidentialityLevel\x12\x10\n\x08metadata\x18\x07 \x01(\x0c\"4\n\x04Type\x12\r\n\tUNDEFINED\x10\x00\x12\n\n\x06GOLANG\x10\x01\x12\x08\n\x04NODE\x10\x02\x12\x07\n\x03\x43\x41R\x10\x03\"\x86\x02\n\x17\x43haincodeDeploymentSpec\x12,\n\rchaincodeSpec\x18\x01 \x01(\x0b\x32\x15.protos.ChaincodeSpec\x12\x31\n\reffectiveDate\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x13\n\x0b\x63odePackage\x18\x03 \x01(\x0c\x12\x45\n\x07\x65xecEnv\x18\x04 \x01(\x0e\x32\x34.protos.ChaincodeDeploymentSpec.ExecutionEnvironment\".\n\x14\x45xecutionEnvironment\x12\n\n\x06\x44OCKER\x10\x00\x12\n\n\x06SYSTEM\x10\x01\"G\n\x17\x43haincodeInvocationSpec\x12,\n\rchaincodeSpec\x18\x01 \x01(\x0b\x32\x15.protos.ChaincodeSpec\"\xbf\x01\n\x18\x43haincodeSecurityContext\x12\x12\n\ncallerCert\x18\x01 \x01(\x0c\x12\x12\n\ncallerSign\x18\x02 \x01(\x0c\x12\x0f\n\x07payload\x18\x03 \x01(\x0c\x12\x0f\n\x07\x62inding\x18\x04 \x01(\x0c\x12\x10\n\x08metadata\x18\x05 \x01(\x0c\x12\x16\n\x0eparentMetadata\x18\x06 \x01(\x0c\x12/\n\x0btxTimestamp\x18\x07 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\"\xa2\x04\n\x10\x43haincodeMessage\x12+\n\x04type\x18\x01 \x01(\x0e\x32\x1d.protos.ChaincodeMessage.Type\x12-\n\ttimestamp\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x0f\n\x07payload\x18\x03 \x01(\x0c\x12\x0c\n\x04uuid\x18\x04 \x01(\t\x12\x39\n\x0fsecurityContext\x18\x05 \x01(\x0b\x32 .protos.ChaincodeSecurityContext\"\xd7\x02\n\x04Type\x12\r\n\tUNDEFINED\x10\x00\x12\x0c\n\x08REGISTER\x10\x01\x12\x0e\n\nREGISTERED\x10\x02\x12\x08\n\x04INIT\x10\x03\x12\t\n\x05READY\x10\x04\x12\x0f\n\x0bTRANSACTION\x10\x05\x12\r\n\tCOMPLETED\x10\x06\x12\t\n\x05\x45RROR\x10\x07\x12\r\n\tGET_STATE\x10\x08\x12\r\n\tPUT_STATE\x10\t\x12\r\n\tDEL_STATE\x10\n\x12\x14\n\x10INVOKE_CHAINCODE\x10\x0b\x12\x10\n\x0cINVOKE_QUERY\x10\x0c\x12\x0c\n\x08RESPONSE\x10\r\x12\t\n\x05QUERY\x10\x0e\x12\x13\n\x0fQUERY_COMPLETED\x10\x0f\x12\x0f\n\x0bQUERY_ERROR\x10\x10\x12\x15\n\x11RANGE_QUERY_STATE\x10\x11\x12\x1a\n\x16RANGE_QUERY_STATE_NEXT\x10\x12\x12\x1b\n\x17RANGE_QUERY_STATE_CLOSE\x10\x13\"*\n\x0cPutStateInfo\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\x0c\"3\n\x0fRangeQueryState\x12\x10\n\x08startKey\x18\x01 \x01(\t\x12\x0e\n\x06\x65ndKey\x18\x02 \x01(\t\"!\n\x13RangeQueryStateNext\x12\n\n\x02ID\x18\x01 \x01(\t\"\"\n\x14RangeQueryStateClose\x12\n\n\x02ID\x18\x01 \x01(\t\"5\n\x17RangeQueryStateKeyValue\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\x0c\"n\n\x17RangeQueryStateResponse\x12\x36\n\rkeysAndValues\x18\x01 \x03(\x0b\x32\x1f.protos.RangeQueryStateKeyValue\x12\x0f\n\x07hasMore\x18\x02 \x01(\x08\x12\n\n\x02ID\x18\x03 \x01(\t*4\n\x14\x43onfidentialityLevel\x12\n\n\x06PUBLIC\x10\x00\x12\x10\n\x0c\x43ONFIDENTIAL\x10\x01\x32X\n\x10\x43haincodeSupport\x12\x44\n\x08Register\x12\x18.protos.ChaincodeMessage\x1a\x18.protos.ChaincodeMessage\"\x00(\x01\x30\x01\x62\x06proto3')
+ ,
+ dependencies=[google_dot_protobuf_dot_timestamp__pb2.DESCRIPTOR,])
+_sym_db.RegisterFileDescriptor(DESCRIPTOR)
+
+_CONFIDENTIALITYLEVEL = _descriptor.EnumDescriptor(
+ name='ConfidentialityLevel',
+ full_name='protos.ConfidentialityLevel',
+ filename=None,
+ file=DESCRIPTOR,
+ values=[
+ _descriptor.EnumValueDescriptor(
+ name='PUBLIC', index=0, number=0,
+ options=None,
+ type=None),
+ _descriptor.EnumValueDescriptor(
+ name='CONFIDENTIAL', index=1, number=1,
+ options=None,
+ type=None),
+ ],
+ containing_type=None,
+ options=None,
+ serialized_start=1884,
+ serialized_end=1936,
+)
+_sym_db.RegisterEnumDescriptor(_CONFIDENTIALITYLEVEL)
+
+ConfidentialityLevel = enum_type_wrapper.EnumTypeWrapper(_CONFIDENTIALITYLEVEL)
+PUBLIC = 0
+CONFIDENTIAL = 1
+
+
+_CHAINCODESPEC_TYPE = _descriptor.EnumDescriptor(
+ name='Type',
+ full_name='protos.ChaincodeSpec.Type',
+ filename=None,
+ file=DESCRIPTOR,
+ values=[
+ _descriptor.EnumValueDescriptor(
+ name='UNDEFINED', index=0, number=0,
+ options=None,
+ type=None),
+ _descriptor.EnumValueDescriptor(
+ name='GOLANG', index=1, number=1,
+ options=None,
+ type=None),
+ _descriptor.EnumValueDescriptor(
+ name='NODE', index=2, number=2,
+ options=None,
+ type=None),
+ _descriptor.EnumValueDescriptor(
+ name='CAR', index=3, number=3,
+ options=None,
+ type=None),
+ ],
+ containing_type=None,
+ options=None,
+ serialized_start=414,
+ serialized_end=466,
+)
+_sym_db.RegisterEnumDescriptor(_CHAINCODESPEC_TYPE)
+
+_CHAINCODEDEPLOYMENTSPEC_EXECUTIONENVIRONMENT = _descriptor.EnumDescriptor(
+ name='ExecutionEnvironment',
+ full_name='protos.ChaincodeDeploymentSpec.ExecutionEnvironment',
+ filename=None,
+ file=DESCRIPTOR,
+ values=[
+ _descriptor.EnumValueDescriptor(
+ name='DOCKER', index=0, number=0,
+ options=None,
+ type=None),
+ _descriptor.EnumValueDescriptor(
+ name='SYSTEM', index=1, number=1,
+ options=None,
+ type=None),
+ ],
+ containing_type=None,
+ options=None,
+ serialized_start=685,
+ serialized_end=731,
+)
+_sym_db.RegisterEnumDescriptor(_CHAINCODEDEPLOYMENTSPEC_EXECUTIONENVIRONMENT)
+
+_CHAINCODEMESSAGE_TYPE = _descriptor.EnumDescriptor(
+ name='Type',
+ full_name='protos.ChaincodeMessage.Type',
+ filename=None,
+ file=DESCRIPTOR,
+ values=[
+ _descriptor.EnumValueDescriptor(
+ name='UNDEFINED', index=0, number=0,
+ options=None,
+ type=None),
+ _descriptor.EnumValueDescriptor(
+ name='REGISTER', index=1, number=1,
+ options=None,
+ type=None),
+ _descriptor.EnumValueDescriptor(
+ name='REGISTERED', index=2, number=2,
+ options=None,
+ type=None),
+ _descriptor.EnumValueDescriptor(
+ name='INIT', index=3, number=3,
+ options=None,
+ type=None),
+ _descriptor.EnumValueDescriptor(
+ name='READY', index=4, number=4,
+ options=None,
+ type=None),
+ _descriptor.EnumValueDescriptor(
+ name='TRANSACTION', index=5, number=5,
+ options=None,
+ type=None),
+ _descriptor.EnumValueDescriptor(
+ name='COMPLETED', index=6, number=6,
+ options=None,
+ type=None),
+ _descriptor.EnumValueDescriptor(
+ name='ERROR', index=7, number=7,
+ options=None,
+ type=None),
+ _descriptor.EnumValueDescriptor(
+ name='GET_STATE', index=8, number=8,
+ options=None,
+ type=None),
+ _descriptor.EnumValueDescriptor(
+ name='PUT_STATE', index=9, number=9,
+ options=None,
+ type=None),
+ _descriptor.EnumValueDescriptor(
+ name='DEL_STATE', index=10, number=10,
+ options=None,
+ type=None),
+ _descriptor.EnumValueDescriptor(
+ name='INVOKE_CHAINCODE', index=11, number=11,
+ options=None,
+ type=None),
+ _descriptor.EnumValueDescriptor(
+ name='INVOKE_QUERY', index=12, number=12,
+ options=None,
+ type=None),
+ _descriptor.EnumValueDescriptor(
+ name='RESPONSE', index=13, number=13,
+ options=None,
+ type=None),
+ _descriptor.EnumValueDescriptor(
+ name='QUERY', index=14, number=14,
+ options=None,
+ type=None),
+ _descriptor.EnumValueDescriptor(
+ name='QUERY_COMPLETED', index=15, number=15,
+ options=None,
+ type=None),
+ _descriptor.EnumValueDescriptor(
+ name='QUERY_ERROR', index=16, number=16,
+ options=None,
+ type=None),
+ _descriptor.EnumValueDescriptor(
+ name='RANGE_QUERY_STATE', index=17, number=17,
+ options=None,
+ type=None),
+ _descriptor.EnumValueDescriptor(
+ name='RANGE_QUERY_STATE_NEXT', index=18, number=18,
+ options=None,
+ type=None),
+ _descriptor.EnumValueDescriptor(
+ name='RANGE_QUERY_STATE_CLOSE', index=19, number=19,
+ options=None,
+ type=None),
+ ],
+ containing_type=None,
+ options=None,
+ serialized_start=1204,
+ serialized_end=1547,
+)
+_sym_db.RegisterEnumDescriptor(_CHAINCODEMESSAGE_TYPE)
+
+
+_CHAINCODEID = _descriptor.Descriptor(
+ name='ChaincodeID',
+ full_name='protos.ChaincodeID',
+ filename=None,
+ file=DESCRIPTOR,
+ containing_type=None,
+ fields=[
+ _descriptor.FieldDescriptor(
+ name='path', full_name='protos.ChaincodeID.path', index=0,
+ number=1, type=9, cpp_type=9, label=1,
+ has_default_value=False, default_value=_b("").decode('utf-8'),
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None),
+ _descriptor.FieldDescriptor(
+ name='name', full_name='protos.ChaincodeID.name', index=1,
+ number=2, type=9, cpp_type=9, label=1,
+ has_default_value=False, default_value=_b("").decode('utf-8'),
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None),
+ ],
+ extensions=[
+ ],
+ nested_types=[],
+ enum_types=[
+ ],
+ options=None,
+ is_extendable=False,
+ syntax='proto3',
+ extension_ranges=[],
+ oneofs=[
+ ],
+ serialized_start=60,
+ serialized_end=101,
+)
+
+
+_CHAINCODEINPUT = _descriptor.Descriptor(
+ name='ChaincodeInput',
+ full_name='protos.ChaincodeInput',
+ filename=None,
+ file=DESCRIPTOR,
+ containing_type=None,
+ fields=[
+ _descriptor.FieldDescriptor(
+ name='function', full_name='protos.ChaincodeInput.function', index=0,
+ number=1, type=9, cpp_type=9, label=1,
+ has_default_value=False, default_value=_b("").decode('utf-8'),
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None),
+ _descriptor.FieldDescriptor(
+ name='args', full_name='protos.ChaincodeInput.args', index=1,
+ number=2, type=9, cpp_type=9, label=3,
+ has_default_value=False, default_value=[],
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None),
+ ],
+ extensions=[
+ ],
+ nested_types=[],
+ enum_types=[
+ ],
+ options=None,
+ is_extendable=False,
+ syntax='proto3',
+ extension_ranges=[],
+ oneofs=[
+ ],
+ serialized_start=103,
+ serialized_end=151,
+)
+
+
+_CHAINCODESPEC = _descriptor.Descriptor(
+ name='ChaincodeSpec',
+ full_name='protos.ChaincodeSpec',
+ filename=None,
+ file=DESCRIPTOR,
+ containing_type=None,
+ fields=[
+ _descriptor.FieldDescriptor(
+ name='type', full_name='protos.ChaincodeSpec.type', index=0,
+ number=1, type=14, cpp_type=8, label=1,
+ has_default_value=False, default_value=0,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None),
+ _descriptor.FieldDescriptor(
+ name='chaincodeID', full_name='protos.ChaincodeSpec.chaincodeID', index=1,
+ number=2, type=11, cpp_type=10, label=1,
+ has_default_value=False, default_value=None,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None),
+ _descriptor.FieldDescriptor(
+ name='ctorMsg', full_name='protos.ChaincodeSpec.ctorMsg', index=2,
+ number=3, type=11, cpp_type=10, label=1,
+ has_default_value=False, default_value=None,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None),
+ _descriptor.FieldDescriptor(
+ name='timeout', full_name='protos.ChaincodeSpec.timeout', index=3,
+ number=4, type=5, cpp_type=1, label=1,
+ has_default_value=False, default_value=0,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None),
+ _descriptor.FieldDescriptor(
+ name='secureContext', full_name='protos.ChaincodeSpec.secureContext', index=4,
+ number=5, type=9, cpp_type=9, label=1,
+ has_default_value=False, default_value=_b("").decode('utf-8'),
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None),
+ _descriptor.FieldDescriptor(
+ name='confidentialityLevel', full_name='protos.ChaincodeSpec.confidentialityLevel', index=5,
+ number=6, type=14, cpp_type=8, label=1,
+ has_default_value=False, default_value=0,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None),
+ _descriptor.FieldDescriptor(
+ name='metadata', full_name='protos.ChaincodeSpec.metadata', index=6,
+ number=7, type=12, cpp_type=9, label=1,
+ has_default_value=False, default_value=_b(""),
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None),
+ ],
+ extensions=[
+ ],
+ nested_types=[],
+ enum_types=[
+ _CHAINCODESPEC_TYPE,
+ ],
+ options=None,
+ is_extendable=False,
+ syntax='proto3',
+ extension_ranges=[],
+ oneofs=[
+ ],
+ serialized_start=154,
+ serialized_end=466,
+)
+
+
+_CHAINCODEDEPLOYMENTSPEC = _descriptor.Descriptor(
+ name='ChaincodeDeploymentSpec',
+ full_name='protos.ChaincodeDeploymentSpec',
+ filename=None,
+ file=DESCRIPTOR,
+ containing_type=None,
+ fields=[
+ _descriptor.FieldDescriptor(
+ name='chaincodeSpec', full_name='protos.ChaincodeDeploymentSpec.chaincodeSpec', index=0,
+ number=1, type=11, cpp_type=10, label=1,
+ has_default_value=False, default_value=None,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None),
+ _descriptor.FieldDescriptor(
+ name='effectiveDate', full_name='protos.ChaincodeDeploymentSpec.effectiveDate', index=1,
+ number=2, type=11, cpp_type=10, label=1,
+ has_default_value=False, default_value=None,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None),
+ _descriptor.FieldDescriptor(
+ name='codePackage', full_name='protos.ChaincodeDeploymentSpec.codePackage', index=2,
+ number=3, type=12, cpp_type=9, label=1,
+ has_default_value=False, default_value=_b(""),
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None),
+ _descriptor.FieldDescriptor(
+ name='execEnv', full_name='protos.ChaincodeDeploymentSpec.execEnv', index=3,
+ number=4, type=14, cpp_type=8, label=1,
+ has_default_value=False, default_value=0,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None),
+ ],
+ extensions=[
+ ],
+ nested_types=[],
+ enum_types=[
+ _CHAINCODEDEPLOYMENTSPEC_EXECUTIONENVIRONMENT,
+ ],
+ options=None,
+ is_extendable=False,
+ syntax='proto3',
+ extension_ranges=[],
+ oneofs=[
+ ],
+ serialized_start=469,
+ serialized_end=731,
+)
+
+
+_CHAINCODEINVOCATIONSPEC = _descriptor.Descriptor(
+ name='ChaincodeInvocationSpec',
+ full_name='protos.ChaincodeInvocationSpec',
+ filename=None,
+ file=DESCRIPTOR,
+ containing_type=None,
+ fields=[
+ _descriptor.FieldDescriptor(
+ name='chaincodeSpec', full_name='protos.ChaincodeInvocationSpec.chaincodeSpec', index=0,
+ number=1, type=11, cpp_type=10, label=1,
+ has_default_value=False, default_value=None,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None),
+ ],
+ extensions=[
+ ],
+ nested_types=[],
+ enum_types=[
+ ],
+ options=None,
+ is_extendable=False,
+ syntax='proto3',
+ extension_ranges=[],
+ oneofs=[
+ ],
+ serialized_start=733,
+ serialized_end=804,
+)
+
+
+_CHAINCODESECURITYCONTEXT = _descriptor.Descriptor(
+ name='ChaincodeSecurityContext',
+ full_name='protos.ChaincodeSecurityContext',
+ filename=None,
+ file=DESCRIPTOR,
+ containing_type=None,
+ fields=[
+ _descriptor.FieldDescriptor(
+ name='callerCert', full_name='protos.ChaincodeSecurityContext.callerCert', index=0,
+ number=1, type=12, cpp_type=9, label=1,
+ has_default_value=False, default_value=_b(""),
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None),
+ _descriptor.FieldDescriptor(
+ name='callerSign', full_name='protos.ChaincodeSecurityContext.callerSign', index=1,
+ number=2, type=12, cpp_type=9, label=1,
+ has_default_value=False, default_value=_b(""),
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None),
+ _descriptor.FieldDescriptor(
+ name='payload', full_name='protos.ChaincodeSecurityContext.payload', index=2,
+ number=3, type=12, cpp_type=9, label=1,
+ has_default_value=False, default_value=_b(""),
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None),
+ _descriptor.FieldDescriptor(
+ name='binding', full_name='protos.ChaincodeSecurityContext.binding', index=3,
+ number=4, type=12, cpp_type=9, label=1,
+ has_default_value=False, default_value=_b(""),
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None),
+ _descriptor.FieldDescriptor(
+ name='metadata', full_name='protos.ChaincodeSecurityContext.metadata', index=4,
+ number=5, type=12, cpp_type=9, label=1,
+ has_default_value=False, default_value=_b(""),
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None),
+ _descriptor.FieldDescriptor(
+ name='parentMetadata', full_name='protos.ChaincodeSecurityContext.parentMetadata', index=5,
+ number=6, type=12, cpp_type=9, label=1,
+ has_default_value=False, default_value=_b(""),
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None),
+ _descriptor.FieldDescriptor(
+ name='txTimestamp', full_name='protos.ChaincodeSecurityContext.txTimestamp', index=6,
+ number=7, type=11, cpp_type=10, label=1,
+ has_default_value=False, default_value=None,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None),
+ ],
+ extensions=[
+ ],
+ nested_types=[],
+ enum_types=[
+ ],
+ options=None,
+ is_extendable=False,
+ syntax='proto3',
+ extension_ranges=[],
+ oneofs=[
+ ],
+ serialized_start=807,
+ serialized_end=998,
+)
+
+
+_CHAINCODEMESSAGE = _descriptor.Descriptor(
+ name='ChaincodeMessage',
+ full_name='protos.ChaincodeMessage',
+ filename=None,
+ file=DESCRIPTOR,
+ containing_type=None,
+ fields=[
+ _descriptor.FieldDescriptor(
+ name='type', full_name='protos.ChaincodeMessage.type', index=0,
+ number=1, type=14, cpp_type=8, label=1,
+ has_default_value=False, default_value=0,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None),
+ _descriptor.FieldDescriptor(
+ name='timestamp', full_name='protos.ChaincodeMessage.timestamp', index=1,
+ number=2, type=11, cpp_type=10, label=1,
+ has_default_value=False, default_value=None,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None),
+ _descriptor.FieldDescriptor(
+ name='payload', full_name='protos.ChaincodeMessage.payload', index=2,
+ number=3, type=12, cpp_type=9, label=1,
+ has_default_value=False, default_value=_b(""),
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None),
+ _descriptor.FieldDescriptor(
+ name='uuid', full_name='protos.ChaincodeMessage.uuid', index=3,
+ number=4, type=9, cpp_type=9, label=1,
+ has_default_value=False, default_value=_b("").decode('utf-8'),
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None),
+ _descriptor.FieldDescriptor(
+ name='securityContext', full_name='protos.ChaincodeMessage.securityContext', index=4,
+ number=5, type=11, cpp_type=10, label=1,
+ has_default_value=False, default_value=None,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None),
+ ],
+ extensions=[
+ ],
+ nested_types=[],
+ enum_types=[
+ _CHAINCODEMESSAGE_TYPE,
+ ],
+ options=None,
+ is_extendable=False,
+ syntax='proto3',
+ extension_ranges=[],
+ oneofs=[
+ ],
+ serialized_start=1001,
+ serialized_end=1547,
+)
+
+
+_PUTSTATEINFO = _descriptor.Descriptor(
+ name='PutStateInfo',
+ full_name='protos.PutStateInfo',
+ filename=None,
+ file=DESCRIPTOR,
+ containing_type=None,
+ fields=[
+ _descriptor.FieldDescriptor(
+ name='key', full_name='protos.PutStateInfo.key', index=0,
+ number=1, type=9, cpp_type=9, label=1,
+ has_default_value=False, default_value=_b("").decode('utf-8'),
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None),
+ _descriptor.FieldDescriptor(
+ name='value', full_name='protos.PutStateInfo.value', index=1,
+ number=2, type=12, cpp_type=9, label=1,
+ has_default_value=False, default_value=_b(""),
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None),
+ ],
+ extensions=[
+ ],
+ nested_types=[],
+ enum_types=[
+ ],
+ options=None,
+ is_extendable=False,
+ syntax='proto3',
+ extension_ranges=[],
+ oneofs=[
+ ],
+ serialized_start=1549,
+ serialized_end=1591,
+)
+
+
+_RANGEQUERYSTATE = _descriptor.Descriptor(
+ name='RangeQueryState',
+ full_name='protos.RangeQueryState',
+ filename=None,
+ file=DESCRIPTOR,
+ containing_type=None,
+ fields=[
+ _descriptor.FieldDescriptor(
+ name='startKey', full_name='protos.RangeQueryState.startKey', index=0,
+ number=1, type=9, cpp_type=9, label=1,
+ has_default_value=False, default_value=_b("").decode('utf-8'),
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None),
+ _descriptor.FieldDescriptor(
+ name='endKey', full_name='protos.RangeQueryState.endKey', index=1,
+ number=2, type=9, cpp_type=9, label=1,
+ has_default_value=False, default_value=_b("").decode('utf-8'),
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None),
+ ],
+ extensions=[
+ ],
+ nested_types=[],
+ enum_types=[
+ ],
+ options=None,
+ is_extendable=False,
+ syntax='proto3',
+ extension_ranges=[],
+ oneofs=[
+ ],
+ serialized_start=1593,
+ serialized_end=1644,
+)
+
+
+_RANGEQUERYSTATENEXT = _descriptor.Descriptor(
+ name='RangeQueryStateNext',
+ full_name='protos.RangeQueryStateNext',
+ filename=None,
+ file=DESCRIPTOR,
+ containing_type=None,
+ fields=[
+ _descriptor.FieldDescriptor(
+ name='ID', full_name='protos.RangeQueryStateNext.ID', index=0,
+ number=1, type=9, cpp_type=9, label=1,
+ has_default_value=False, default_value=_b("").decode('utf-8'),
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None),
+ ],
+ extensions=[
+ ],
+ nested_types=[],
+ enum_types=[
+ ],
+ options=None,
+ is_extendable=False,
+ syntax='proto3',
+ extension_ranges=[],
+ oneofs=[
+ ],
+ serialized_start=1646,
+ serialized_end=1679,
+)
+
+
+_RANGEQUERYSTATECLOSE = _descriptor.Descriptor(
+ name='RangeQueryStateClose',
+ full_name='protos.RangeQueryStateClose',
+ filename=None,
+ file=DESCRIPTOR,
+ containing_type=None,
+ fields=[
+ _descriptor.FieldDescriptor(
+ name='ID', full_name='protos.RangeQueryStateClose.ID', index=0,
+ number=1, type=9, cpp_type=9, label=1,
+ has_default_value=False, default_value=_b("").decode('utf-8'),
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None),
+ ],
+ extensions=[
+ ],
+ nested_types=[],
+ enum_types=[
+ ],
+ options=None,
+ is_extendable=False,
+ syntax='proto3',
+ extension_ranges=[],
+ oneofs=[
+ ],
+ serialized_start=1681,
+ serialized_end=1715,
+)
+
+
+_RANGEQUERYSTATEKEYVALUE = _descriptor.Descriptor(
+ name='RangeQueryStateKeyValue',
+ full_name='protos.RangeQueryStateKeyValue',
+ filename=None,
+ file=DESCRIPTOR,
+ containing_type=None,
+ fields=[
+ _descriptor.FieldDescriptor(
+ name='key', full_name='protos.RangeQueryStateKeyValue.key', index=0,
+ number=1, type=9, cpp_type=9, label=1,
+ has_default_value=False, default_value=_b("").decode('utf-8'),
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None),
+ _descriptor.FieldDescriptor(
+ name='value', full_name='protos.RangeQueryStateKeyValue.value', index=1,
+ number=2, type=12, cpp_type=9, label=1,
+ has_default_value=False, default_value=_b(""),
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None),
+ ],
+ extensions=[
+ ],
+ nested_types=[],
+ enum_types=[
+ ],
+ options=None,
+ is_extendable=False,
+ syntax='proto3',
+ extension_ranges=[],
+ oneofs=[
+ ],
+ serialized_start=1717,
+ serialized_end=1770,
+)
+
+
+_RANGEQUERYSTATERESPONSE = _descriptor.Descriptor(
+ name='RangeQueryStateResponse',
+ full_name='protos.RangeQueryStateResponse',
+ filename=None,
+ file=DESCRIPTOR,
+ containing_type=None,
+ fields=[
+ _descriptor.FieldDescriptor(
+ name='keysAndValues', full_name='protos.RangeQueryStateResponse.keysAndValues', index=0,
+ number=1, type=11, cpp_type=10, label=3,
+ has_default_value=False, default_value=[],
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None),
+ _descriptor.FieldDescriptor(
+ name='hasMore', full_name='protos.RangeQueryStateResponse.hasMore', index=1,
+ number=2, type=8, cpp_type=7, label=1,
+ has_default_value=False, default_value=False,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None),
+ _descriptor.FieldDescriptor(
+ name='ID', full_name='protos.RangeQueryStateResponse.ID', index=2,
+ number=3, type=9, cpp_type=9, label=1,
+ has_default_value=False, default_value=_b("").decode('utf-8'),
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None),
+ ],
+ extensions=[
+ ],
+ nested_types=[],
+ enum_types=[
+ ],
+ options=None,
+ is_extendable=False,
+ syntax='proto3',
+ extension_ranges=[],
+ oneofs=[
+ ],
+ serialized_start=1772,
+ serialized_end=1882,
+)
+
+_CHAINCODESPEC.fields_by_name['type'].enum_type = _CHAINCODESPEC_TYPE
+_CHAINCODESPEC.fields_by_name['chaincodeID'].message_type = _CHAINCODEID
+_CHAINCODESPEC.fields_by_name['ctorMsg'].message_type = _CHAINCODEINPUT
+_CHAINCODESPEC.fields_by_name['confidentialityLevel'].enum_type = _CONFIDENTIALITYLEVEL
+_CHAINCODESPEC_TYPE.containing_type = _CHAINCODESPEC
+_CHAINCODEDEPLOYMENTSPEC.fields_by_name['chaincodeSpec'].message_type = _CHAINCODESPEC
+_CHAINCODEDEPLOYMENTSPEC.fields_by_name['effectiveDate'].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP
+_CHAINCODEDEPLOYMENTSPEC.fields_by_name['execEnv'].enum_type = _CHAINCODEDEPLOYMENTSPEC_EXECUTIONENVIRONMENT
+_CHAINCODEDEPLOYMENTSPEC_EXECUTIONENVIRONMENT.containing_type = _CHAINCODEDEPLOYMENTSPEC
+_CHAINCODEINVOCATIONSPEC.fields_by_name['chaincodeSpec'].message_type = _CHAINCODESPEC
+_CHAINCODESECURITYCONTEXT.fields_by_name['txTimestamp'].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP
+_CHAINCODEMESSAGE.fields_by_name['type'].enum_type = _CHAINCODEMESSAGE_TYPE
+_CHAINCODEMESSAGE.fields_by_name['timestamp'].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP
+_CHAINCODEMESSAGE.fields_by_name['securityContext'].message_type = _CHAINCODESECURITYCONTEXT
+_CHAINCODEMESSAGE_TYPE.containing_type = _CHAINCODEMESSAGE
+_RANGEQUERYSTATERESPONSE.fields_by_name['keysAndValues'].message_type = _RANGEQUERYSTATEKEYVALUE
+DESCRIPTOR.message_types_by_name['ChaincodeID'] = _CHAINCODEID
+DESCRIPTOR.message_types_by_name['ChaincodeInput'] = _CHAINCODEINPUT
+DESCRIPTOR.message_types_by_name['ChaincodeSpec'] = _CHAINCODESPEC
+DESCRIPTOR.message_types_by_name['ChaincodeDeploymentSpec'] = _CHAINCODEDEPLOYMENTSPEC
+DESCRIPTOR.message_types_by_name['ChaincodeInvocationSpec'] = _CHAINCODEINVOCATIONSPEC
+DESCRIPTOR.message_types_by_name['ChaincodeSecurityContext'] = _CHAINCODESECURITYCONTEXT
+DESCRIPTOR.message_types_by_name['ChaincodeMessage'] = _CHAINCODEMESSAGE
+DESCRIPTOR.message_types_by_name['PutStateInfo'] = _PUTSTATEINFO
+DESCRIPTOR.message_types_by_name['RangeQueryState'] = _RANGEQUERYSTATE
+DESCRIPTOR.message_types_by_name['RangeQueryStateNext'] = _RANGEQUERYSTATENEXT
+DESCRIPTOR.message_types_by_name['RangeQueryStateClose'] = _RANGEQUERYSTATECLOSE
+DESCRIPTOR.message_types_by_name['RangeQueryStateKeyValue'] = _RANGEQUERYSTATEKEYVALUE
+DESCRIPTOR.message_types_by_name['RangeQueryStateResponse'] = _RANGEQUERYSTATERESPONSE
+DESCRIPTOR.enum_types_by_name['ConfidentialityLevel'] = _CONFIDENTIALITYLEVEL
+
+ChaincodeID = _reflection.GeneratedProtocolMessageType('ChaincodeID', (_message.Message,), dict(
+ DESCRIPTOR = _CHAINCODEID,
+ __module__ = 'chaincode_pb2'
+ # @@protoc_insertion_point(class_scope:protos.ChaincodeID)
+ ))
+_sym_db.RegisterMessage(ChaincodeID)
+
+ChaincodeInput = _reflection.GeneratedProtocolMessageType('ChaincodeInput', (_message.Message,), dict(
+ DESCRIPTOR = _CHAINCODEINPUT,
+ __module__ = 'chaincode_pb2'
+ # @@protoc_insertion_point(class_scope:protos.ChaincodeInput)
+ ))
+_sym_db.RegisterMessage(ChaincodeInput)
+
+ChaincodeSpec = _reflection.GeneratedProtocolMessageType('ChaincodeSpec', (_message.Message,), dict(
+ DESCRIPTOR = _CHAINCODESPEC,
+ __module__ = 'chaincode_pb2'
+ # @@protoc_insertion_point(class_scope:protos.ChaincodeSpec)
+ ))
+_sym_db.RegisterMessage(ChaincodeSpec)
+
+ChaincodeDeploymentSpec = _reflection.GeneratedProtocolMessageType('ChaincodeDeploymentSpec', (_message.Message,), dict(
+ DESCRIPTOR = _CHAINCODEDEPLOYMENTSPEC,
+ __module__ = 'chaincode_pb2'
+ # @@protoc_insertion_point(class_scope:protos.ChaincodeDeploymentSpec)
+ ))
+_sym_db.RegisterMessage(ChaincodeDeploymentSpec)
+
+ChaincodeInvocationSpec = _reflection.GeneratedProtocolMessageType('ChaincodeInvocationSpec', (_message.Message,), dict(
+ DESCRIPTOR = _CHAINCODEINVOCATIONSPEC,
+ __module__ = 'chaincode_pb2'
+ # @@protoc_insertion_point(class_scope:protos.ChaincodeInvocationSpec)
+ ))
+_sym_db.RegisterMessage(ChaincodeInvocationSpec)
+
+ChaincodeSecurityContext = _reflection.GeneratedProtocolMessageType('ChaincodeSecurityContext', (_message.Message,), dict(
+ DESCRIPTOR = _CHAINCODESECURITYCONTEXT,
+ __module__ = 'chaincode_pb2'
+ # @@protoc_insertion_point(class_scope:protos.ChaincodeSecurityContext)
+ ))
+_sym_db.RegisterMessage(ChaincodeSecurityContext)
+
+ChaincodeMessage = _reflection.GeneratedProtocolMessageType('ChaincodeMessage', (_message.Message,), dict(
+ DESCRIPTOR = _CHAINCODEMESSAGE,
+ __module__ = 'chaincode_pb2'
+ # @@protoc_insertion_point(class_scope:protos.ChaincodeMessage)
+ ))
+_sym_db.RegisterMessage(ChaincodeMessage)
+
+PutStateInfo = _reflection.GeneratedProtocolMessageType('PutStateInfo', (_message.Message,), dict(
+ DESCRIPTOR = _PUTSTATEINFO,
+ __module__ = 'chaincode_pb2'
+ # @@protoc_insertion_point(class_scope:protos.PutStateInfo)
+ ))
+_sym_db.RegisterMessage(PutStateInfo)
+
+RangeQueryState = _reflection.GeneratedProtocolMessageType('RangeQueryState', (_message.Message,), dict(
+ DESCRIPTOR = _RANGEQUERYSTATE,
+ __module__ = 'chaincode_pb2'
+ # @@protoc_insertion_point(class_scope:protos.RangeQueryState)
+ ))
+_sym_db.RegisterMessage(RangeQueryState)
+
+RangeQueryStateNext = _reflection.GeneratedProtocolMessageType('RangeQueryStateNext', (_message.Message,), dict(
+ DESCRIPTOR = _RANGEQUERYSTATENEXT,
+ __module__ = 'chaincode_pb2'
+ # @@protoc_insertion_point(class_scope:protos.RangeQueryStateNext)
+ ))
+_sym_db.RegisterMessage(RangeQueryStateNext)
+
+RangeQueryStateClose = _reflection.GeneratedProtocolMessageType('RangeQueryStateClose', (_message.Message,), dict(
+ DESCRIPTOR = _RANGEQUERYSTATECLOSE,
+ __module__ = 'chaincode_pb2'
+ # @@protoc_insertion_point(class_scope:protos.RangeQueryStateClose)
+ ))
+_sym_db.RegisterMessage(RangeQueryStateClose)
+
+RangeQueryStateKeyValue = _reflection.GeneratedProtocolMessageType('RangeQueryStateKeyValue', (_message.Message,), dict(
+ DESCRIPTOR = _RANGEQUERYSTATEKEYVALUE,
+ __module__ = 'chaincode_pb2'
+ # @@protoc_insertion_point(class_scope:protos.RangeQueryStateKeyValue)
+ ))
+_sym_db.RegisterMessage(RangeQueryStateKeyValue)
+
+RangeQueryStateResponse = _reflection.GeneratedProtocolMessageType('RangeQueryStateResponse', (_message.Message,), dict(
+ DESCRIPTOR = _RANGEQUERYSTATERESPONSE,
+ __module__ = 'chaincode_pb2'
+ # @@protoc_insertion_point(class_scope:protos.RangeQueryStateResponse)
+ ))
+_sym_db.RegisterMessage(RangeQueryStateResponse)
+
+
+import abc
+import six
+from grpc.beta import implementations as beta_implementations
+from grpc.beta import interfaces as beta_interfaces
+from grpc.framework.common import cardinality
+from grpc.framework.interfaces.face import utilities as face_utilities
+
+class BetaChaincodeSupportServicer(object):
+ """Interface that provides support to chaincode execution. ChaincodeContext
+ provides the context necessary for the server to respond appropriately.
+ """
+ def Register(self, request_iterator, context):
+ context.code(beta_interfaces.StatusCode.UNIMPLEMENTED)
+
+class BetaChaincodeSupportStub(object):
+ """Interface that provides support to chaincode execution. ChaincodeContext
+ provides the context necessary for the server to respond appropriately.
+ """
+ def Register(self, request_iterator, timeout):
+ raise NotImplementedError()
+
+def beta_create_ChaincodeSupport_server(servicer, pool=None, pool_size=None, default_timeout=None, maximum_timeout=None):
+ import chaincode_pb2
+ import chaincode_pb2
+ request_deserializers = {
+ ('protos.ChaincodeSupport', 'Register'): chaincode_pb2.ChaincodeMessage.FromString,
+ }
+ response_serializers = {
+ ('protos.ChaincodeSupport', 'Register'): chaincode_pb2.ChaincodeMessage.SerializeToString,
+ }
+ method_implementations = {
+ ('protos.ChaincodeSupport', 'Register'): face_utilities.stream_stream_inline(servicer.Register),
+ }
+ server_options = beta_implementations.server_options(request_deserializers=request_deserializers, response_serializers=response_serializers, thread_pool=pool, thread_pool_size=pool_size, default_timeout=default_timeout, maximum_timeout=maximum_timeout)
+ return beta_implementations.server(method_implementations, options=server_options)
+
+def beta_create_ChaincodeSupport_stub(channel, host=None, metadata_transformer=None, pool=None, pool_size=None):
+ import chaincode_pb2
+ import chaincode_pb2
+ request_serializers = {
+ ('protos.ChaincodeSupport', 'Register'): chaincode_pb2.ChaincodeMessage.SerializeToString,
+ }
+ response_deserializers = {
+ ('protos.ChaincodeSupport', 'Register'): chaincode_pb2.ChaincodeMessage.FromString,
+ }
+ cardinalities = {
+ 'Register': cardinality.Cardinality.STREAM_STREAM,
+ }
+ stub_options = beta_implementations.stub_options(host=host, metadata_transformer=metadata_transformer, request_serializers=request_serializers, response_deserializers=response_deserializers, thread_pool=pool, thread_pool_size=pool_size)
+ return beta_implementations.dynamic_stub(channel, 'protos.ChaincodeSupport', cardinalities, options=stub_options)
+# @@protoc_insertion_point(module_scope)
diff --git a/bddtests/chaincode_rbac.feature b/bddtests/chaincode_rbac.feature
new file mode 100644
index 00000000000..fd8f1c1ac70
--- /dev/null
+++ b/bddtests/chaincode_rbac.feature
@@ -0,0 +1,229 @@
+#
+# Test Hyperledger Chaincodes using various RBAC mechanisms
+#
+# Tags that can be used and will affect test internals:
+#
+# @doNotDecompose will NOT decompose the named compose_yaml after scenario ends. Useful for setting up environment and reviewing after scenario.
+#
+
+Feature: Role Based Access Control (RBAC)
+ As a HyperLedger developer
+ I want various mechanisms available for implementing RBAC within Chaincode
+
+ #@doNotDecompose
+ @issue_1207
+ Scenario Outline: test a chaincode showing how to implement role-based access control using TCerts with no attributes
+
+ Given we compose ""
+ And I wait "5" seconds
+ And I register with CA supplying username "binhn" and secret "7avZQLwcUe9q" on peers:
+ | vp0 |
+ And I register with CA supplying username "alice" and secret "CMS10pEQlB16" on peers:
+ | vp0 |
+ And I use the following credentials for querying peers:
+ | peer | username | secret |
+ | vp0 | test_user0 | MS9qrN8hFjlE |
+ | vp1 | test_user1 | jGlNl6ImkuDo |
+ | vp2 | test_user2 | zMflqOKezFiA |
+ | vp3 | test_user3 | vWdLCE00vJy0 |
+
+ When requesting "/chain" from "vp0"
+ Then I should get a JSON response with "height" = "1"
+
+ #Acquire a Application-TCert for binh (NOTE: application TCert is NEVER used for signing fabric TXs)
+ When user "binhn" requests a new application TCert
+ # Binhn is assuming the application ROLE of Admin by using his own TCert
+ And user "binhn" stores their last result as "TCERT_APP_ADMIN"
+
+ # Deploy, in this case Binh is assinging himself as the Admin for the RBAC chaincode.
+ When user "binhn" sets metadata to their stored value "TCERT_APP_ADMIN"
+ And user "binhn" deploys chaincode "github.com/hyperledger/fabric/examples/chaincode/go/rbac_tcerts_no_attrs" aliased as "rbac_tcerts_no_attrs" with ctor "init" and args
+ ||
+ ||
+ Then I should have received a chaincode name
+ Then I wait up to "" seconds for transaction to be committed to peers:
+ | vp0 | vp1 | vp2 | vp3 |
+
+ # When we refer to any party, we actually mean a specific application TCert for that party
+
+ #Acquire a Application-TCert for alice, and supplies to binhn (NOTE: application TCert is NEVER used for signing fabric TXs)
+ When user "alice" requests a new application TCert
+ And user "alice" stores their last result as "TCERT_APP_ALICE_1"
+ # Alice gives binhn her application TCert (usually OUT-OF-BAND)
+ And user "alice" gives stored value "TCERT_APP_ALICE_1" to "binhn"
+
+
+ # binhn is assigning the role of 'writer' to alice
+ When "binhn" uses application TCert "TCERT_APP_ADMIN" to assign role "writer" to application TCert "TCERT_APP_ALICE_1"
+ Then I should have received a transactionID
+ Then I wait up to "60" seconds for transaction to be committed to peers:
+ | vp0 | vp1 | vp2 | vp3 |
+ And "binhn"'s last transaction should have succeeded
+
+
+ # Alice attempts to assign a role to binh, but this will fail as she does NOT have permission.
+ When "alice" uses application TCert "TCERT_APP_ALICE_1" to assign role "writer" to application TCert "TCERT_APP_ALICE_1"
+ Then I wait up to "60" seconds for transaction to be committed to peers:
+ | vp0 | vp1 | vp2 | vp3 |
+ And "alice"'s last transaction should have failed with message that contains "The invoker does not have the required roles"
+
+ #When "Alice" writes value "Alice's value"
+ #Then invoke should succeed
+
+ #When "Bob" reads value
+ #Then the result should be "Alice's value"
+
+ #When "Alice" reads value
+ #Then should fail with failure "Permissed denied"
+
+
+ # TODO: Must manage TCert expiration for all parties involved.
+
+ Examples: Consensus Options
+ | ComposeFile | WaitTime |
+ | docker-compose-4-consensus-batch.yml | 120 |
+
+
+ #@doNotDecompose
+ @issue_RBAC_TCERT_With_Attributes
+ Scenario Outline: test a chaincode showing how to implement role-based access control using TCerts with attributes
+
+ Given we compose ""
+ And I wait "5" seconds
+ And I register with CA supplying username "binhn" and secret "7avZQLwcUe9q" on peers:
+ | vp0 |
+ And I register with CA supplying username "alice" and secret "8Y7WIrLX0A8G" on peers:
+ | vp0 |
+ And I use the following credentials for querying peers:
+ | peer | username | secret |
+ | vp0 | test_user0 | MS9qrN8hFjlE |
+ | vp1 | test_user1 | jGlNl6ImkuDo |
+ | vp2 | test_user2 | zMflqOKezFiA |
+ | vp3 | test_user3 | vWdLCE00vJy0 |
+
+ When requesting "/chain" from "vp0"
+ Then I should get a JSON response with "height" = "1"
+
+ #Acquire a batch of TCerts for binh and store the TCertOwner key material associated with the batch. PreK0 which is per TCert.
+ When user "binhn" requests a batch of TCerts of size "1" with attributes:
+ | role |
+ | admin |
+ And user "binhn" stores their last result as "BATCH_OF_TCERTS"
+ #
+
+ # Deploy, in this case Binh is assinging himself as the Admin for the RBAC chaincode.
+ And user "binhn" deploys chaincode "github.com/hyperledger/fabric/examples/chaincode/go/rbac_tcerts_with_attrs" with ctor "init" to "vp0"
+ ||
+ ||
+ Then I should have received a chaincode name
+ Then I wait up to "" seconds for transaction to be committed to peers:
+ | vp0 | vp1 | vp2 | vp3 |
+
+ # When we refer to any party, we actually mean a specific application TCert for that party
+
+ #Acquire a Application-TCert for alice, and supplies to binhn (NOTE: application TCert is NEVER used for signing fabric TXs)
+ When user "alice" requests a new application TCert
+ And user "alice" stores their last result as "TCERT_APP_ALICE_1"
+ # Alice gives binhn her application TCert (usually OUT-OF-BAND)
+ And user "alice" gives stored value "TCERT_APP_ALICE_1" to "binhn"
+
+
+ # binhn is assigning the role of 'writer' to alice
+ When "binhn" uses application TCert "TCERT_APP_ADMIN" to assign role "writer" to application TCert "TCERT_APP_ALICE_1"
+ Then I should have received a transactionID
+ Then I wait up to "60" seconds for transaction to be committed to peers:
+ | vp0 | vp1 | vp2 | vp3 |
+
+
+ # Alice attempts to assign a role to binh, but this will fail as she does NOT have permission. The check is currently
+ # to make sure the TX does NOT appear on the chain.
+ When "alice" uses application TCert "TCERT_APP_ALICE_1" to assign role "writer" to application TCert "TCERT_APP_ALICE_1"
+ Then I wait up to "60" seconds for transaction to be committed to peers:
+ | vp0 | vp1 | vp2 | vp3 |
+ And transaction should have failed with message "Permission denied"
+
+
+ #When "Administrator" assigns role "reader" to "Bob"
+ #Then invoke should succeed
+
+ #When "Bob" assigns role "reader" to "Bob"
+ #Then invoke should fail with failure "Permissed denied"
+
+ #When "Alice" writes value "Alice's value"
+ #Then invoke should succeed
+
+ #When "Bob" reads value
+ #Then the result should be "Alice's value"
+
+ #When "Alice" reads value
+ #Then should fail with failure "Permissed denied"
+
+
+ # TODO: Must manage TCert expiration for all parties involved.
+
+# When I invoke chaincode "rbac_tcerts_no_attrs" function name "addRole" on "vp0"
+# |arg1|arg2|arg3|
+# | a | b | 20 |
+# Then I should have received a transactionID
+# Then I wait up to "10" seconds for transaction to be committed to peers:
+# | vp0 | vp1 | vp2 | vp3 |
+
+
+ Examples: Consensus Options
+ | ComposeFile | WaitTime |
+ | docker-compose-4-consensus-batch.yml | 60 |
+
+
+ #@doNotDecompose
+ @issue_1565
+ Scenario Outline: test chaincode to chaincode invocation
+
+ Given we compose ""
+ And I wait "5" seconds
+ And I register with CA supplying username "binhn" and secret "7avZQLwcUe9q" on peers:
+ | vp0 |
+ And I register with CA supplying username "alice" and secret "CMS10pEQlB16" on peers:
+ | vp0 |
+ And I use the following credentials for querying peers:
+ | peer | username | secret |
+ | vp0 | test_user0 | MS9qrN8hFjlE |
+ | vp1 | test_user1 | jGlNl6ImkuDo |
+ | vp2 | test_user2 | zMflqOKezFiA |
+ | vp3 | test_user3 | vWdLCE00vJy0 |
+
+ When requesting "/chain" from "vp0"
+ Then I should get a JSON response with "height" = "1"
+
+ # Deploy the first chaincode
+ When user "binhn" deploys chaincode "github.com/hyperledger/fabric/examples/chaincode/go/chaincode_example02" aliased as "chaincode_example02" with ctor "init" and args
+ | arg1 | arg2 | arg3 | arg4 |
+ | a | 100 | b | 200 |
+ Then I should have received a chaincode name
+ Then I wait up to "" seconds for transaction to be committed to peers:
+ | vp0 | vp1 | vp2 | vp3 |
+ And "binhn"'s last transaction should have succeeded
+
+ # Deploy the second chaincode
+ When user "binhn" deploys chaincode "github.com/hyperledger/fabric/examples/chaincode/go/chaincode_example05" aliased as "chaincode_example05" with ctor "init" and args
+ | arg1 | arg2 |
+ | sum | 0 |
+ Then I should have received a chaincode name
+ Then I wait up to "" seconds for transaction to be committed to peers:
+ | vp0 | vp1 | vp2 | vp3 |
+ And "binhn"'s last transaction should have succeeded
+
+
+ # Invoke chaincode_example05 which in turn will invoke chaincode_example02. NOTE: Binhn must pass a reference to the first chaincode
+ Given user "binhn" stores a reference to chaincode "chaincode_example02" as "cc1"
+ When user "binhn" invokes chaincode "chaincode_example05" function name "invoke" with args
+ | arg1 | arg2 |
+ | {cc1} | sum |
+ Then I should have received a transactionID
+ Then I wait up to "60" seconds for transaction to be committed to peers:
+ | vp0 | vp1 | vp2 | vp3 |
+ And "binhn"'s last transaction should have succeeded
+
+
+ Examples: Consensus Options
+ | ComposeFile | WaitTime |
+ | docker-compose-4-consensus-batch.yml | 60 |
diff --git a/bddtests/compose-defaults.yml b/bddtests/compose-defaults.yml
new file mode 100644
index 00000000000..d8631dfa2d3
--- /dev/null
+++ b/bddtests/compose-defaults.yml
@@ -0,0 +1,17 @@
+vp:
+ image: hyperledger/fabric-peer
+ environment:
+ - CORE_PEER_ADDRESSAUTODETECT=true
+ - CORE_VM_ENDPOINT=http://172.17.0.1:2375
+ # TODO: This is currently required due to BUG in variant logic based upon log level.
+ - CORE_LOGGING_LEVEL=DEBUG
+ # Startup of peer must be delayed to allow membersrvc to come up first
+ command: sh -c "sleep 5; peer node start"
+ #command: peer node start
+
+ # Use these options if coverage desired for peers
+ #image: hyperledger/fabric-peer-coverage
+ #command: ./peer.test --test.coverprofile=coverage.cov node start
+membersrvc:
+ image: hyperledger/fabric-membersrvc
+ command: membersrvc
diff --git a/bddtests/devops_pb2.py b/bddtests/devops_pb2.py
new file mode 100644
index 00000000000..5318489b35e
--- /dev/null
+++ b/bddtests/devops_pb2.py
@@ -0,0 +1,581 @@
+# Generated by the protocol buffer compiler. DO NOT EDIT!
+# source: devops.proto
+
+import sys
+_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
+from google.protobuf import descriptor as _descriptor
+from google.protobuf import message as _message
+from google.protobuf import reflection as _reflection
+from google.protobuf import symbol_database as _symbol_database
+from google.protobuf import descriptor_pb2
+# @@protoc_insertion_point(imports)
+
+_sym_db = _symbol_database.Default()
+
+
+import chaincode_pb2 as chaincode__pb2
+import fabric_pb2 as fabric__pb2
+
+
+DESCRIPTOR = _descriptor.FileDescriptor(
+ name='devops.proto',
+ package='protos',
+ syntax='proto3',
+ serialized_pb=_b('\n\x0c\x64\x65vops.proto\x12\x06protos\x1a\x0f\x63haincode.proto\x1a\x0c\x66\x61\x62ric.proto\"0\n\x06Secret\x12\x10\n\x08\x65nrollId\x18\x01 \x01(\t\x12\x14\n\x0c\x65nrollSecret\x18\x02 \x01(\t\"L\n\nSigmaInput\x12\x1e\n\x06secret\x18\x01 \x01(\x0b\x32\x0e.protos.Secret\x12\x10\n\x08\x61ppTCert\x18\x02 \x01(\x0c\x12\x0c\n\x04\x64\x61ta\x18\x03 \x01(\x0c\"g\n\x12\x45xecuteWithBinding\x12@\n\x17\x63haincodeInvocationSpec\x18\x01 \x01(\x0b\x32\x1f.protos.ChaincodeInvocationSpec\x12\x0f\n\x07\x62inding\x18\x02 \x01(\x0c\"A\n\x0bSigmaOutput\x12\r\n\x05tcert\x18\x01 \x01(\x0c\x12\r\n\x05sigma\x18\x02 \x01(\x0c\x12\x14\n\x0c\x61sn1Encoding\x18\x03 \x01(\x0c\"\xba\x01\n\x0b\x42uildResult\x12.\n\x06status\x18\x01 \x01(\x0e\x32\x1e.protos.BuildResult.StatusCode\x12\x0b\n\x03msg\x18\x02 \x01(\t\x12\x37\n\x0e\x64\x65ploymentSpec\x18\x03 \x01(\x0b\x32\x1f.protos.ChaincodeDeploymentSpec\"5\n\nStatusCode\x12\r\n\tUNDEFINED\x10\x00\x12\x0b\n\x07SUCCESS\x10\x01\x12\x0b\n\x07\x46\x41ILURE\x10\x02\"-\n\x12TransactionRequest\x12\x17\n\x0ftransactionUuid\x18\x01 \x01(\t2\xfe\x04\n\x06\x44\x65vops\x12+\n\x05Login\x12\x0e.protos.Secret\x1a\x10.protos.Response\"\x00\x12\x41\n\x05\x42uild\x12\x15.protos.ChaincodeSpec\x1a\x1f.protos.ChaincodeDeploymentSpec\"\x00\x12\x42\n\x06\x44\x65ploy\x12\x15.protos.ChaincodeSpec\x1a\x1f.protos.ChaincodeDeploymentSpec\"\x00\x12=\n\x06Invoke\x12\x1f.protos.ChaincodeInvocationSpec\x1a\x10.protos.Response\"\x00\x12<\n\x05Query\x12\x1f.protos.ChaincodeInvocationSpec\x1a\x10.protos.Response\"\x00\x12\x46\n\x14GetTransactionResult\x12\x1a.protos.TransactionRequest\x1a\x10.protos.Response\"\x00\x12=\n\x17\x45XP_GetApplicationTCert\x12\x0e.protos.Secret\x1a\x10.protos.Response\"\x00\x12\x36\n\x10\x45XP_PrepareForTx\x12\x0e.protos.Secret\x1a\x10.protos.Response\"\x00\x12:\n\x10\x45XP_ProduceSigma\x12\x12.protos.SigmaInput\x1a\x10.protos.Response\"\x00\x12H\n\x16\x45XP_ExecuteWithBinding\x12\x1a.protos.ExecuteWithBinding\x1a\x10.protos.Response\"\x00\x62\x06proto3')
+ ,
+ dependencies=[chaincode__pb2.DESCRIPTOR,fabric__pb2.DESCRIPTOR,])
+_sym_db.RegisterFileDescriptor(DESCRIPTOR)
+
+
+
+_BUILDRESULT_STATUSCODE = _descriptor.EnumDescriptor(
+ name='StatusCode',
+ full_name='protos.BuildResult.StatusCode',
+ filename=None,
+ file=DESCRIPTOR,
+ values=[
+ _descriptor.EnumValueDescriptor(
+ name='UNDEFINED', index=0, number=0,
+ options=None,
+ type=None),
+ _descriptor.EnumValueDescriptor(
+ name='SUCCESS', index=1, number=1,
+ options=None,
+ type=None),
+ _descriptor.EnumValueDescriptor(
+ name='FAILURE', index=2, number=2,
+ options=None,
+ type=None),
+ ],
+ containing_type=None,
+ options=None,
+ serialized_start=489,
+ serialized_end=542,
+)
+_sym_db.RegisterEnumDescriptor(_BUILDRESULT_STATUSCODE)
+
+
+_SECRET = _descriptor.Descriptor(
+ name='Secret',
+ full_name='protos.Secret',
+ filename=None,
+ file=DESCRIPTOR,
+ containing_type=None,
+ fields=[
+ _descriptor.FieldDescriptor(
+ name='enrollId', full_name='protos.Secret.enrollId', index=0,
+ number=1, type=9, cpp_type=9, label=1,
+ has_default_value=False, default_value=_b("").decode('utf-8'),
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None),
+ _descriptor.FieldDescriptor(
+ name='enrollSecret', full_name='protos.Secret.enrollSecret', index=1,
+ number=2, type=9, cpp_type=9, label=1,
+ has_default_value=False, default_value=_b("").decode('utf-8'),
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None),
+ ],
+ extensions=[
+ ],
+ nested_types=[],
+ enum_types=[
+ ],
+ options=None,
+ is_extendable=False,
+ syntax='proto3',
+ extension_ranges=[],
+ oneofs=[
+ ],
+ serialized_start=55,
+ serialized_end=103,
+)
+
+
+_SIGMAINPUT = _descriptor.Descriptor(
+ name='SigmaInput',
+ full_name='protos.SigmaInput',
+ filename=None,
+ file=DESCRIPTOR,
+ containing_type=None,
+ fields=[
+ _descriptor.FieldDescriptor(
+ name='secret', full_name='protos.SigmaInput.secret', index=0,
+ number=1, type=11, cpp_type=10, label=1,
+ has_default_value=False, default_value=None,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None),
+ _descriptor.FieldDescriptor(
+ name='appTCert', full_name='protos.SigmaInput.appTCert', index=1,
+ number=2, type=12, cpp_type=9, label=1,
+ has_default_value=False, default_value=_b(""),
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None),
+ _descriptor.FieldDescriptor(
+ name='data', full_name='protos.SigmaInput.data', index=2,
+ number=3, type=12, cpp_type=9, label=1,
+ has_default_value=False, default_value=_b(""),
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None),
+ ],
+ extensions=[
+ ],
+ nested_types=[],
+ enum_types=[
+ ],
+ options=None,
+ is_extendable=False,
+ syntax='proto3',
+ extension_ranges=[],
+ oneofs=[
+ ],
+ serialized_start=105,
+ serialized_end=181,
+)
+
+
+_EXECUTEWITHBINDING = _descriptor.Descriptor(
+ name='ExecuteWithBinding',
+ full_name='protos.ExecuteWithBinding',
+ filename=None,
+ file=DESCRIPTOR,
+ containing_type=None,
+ fields=[
+ _descriptor.FieldDescriptor(
+ name='chaincodeInvocationSpec', full_name='protos.ExecuteWithBinding.chaincodeInvocationSpec', index=0,
+ number=1, type=11, cpp_type=10, label=1,
+ has_default_value=False, default_value=None,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None),
+ _descriptor.FieldDescriptor(
+ name='binding', full_name='protos.ExecuteWithBinding.binding', index=1,
+ number=2, type=12, cpp_type=9, label=1,
+ has_default_value=False, default_value=_b(""),
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None),
+ ],
+ extensions=[
+ ],
+ nested_types=[],
+ enum_types=[
+ ],
+ options=None,
+ is_extendable=False,
+ syntax='proto3',
+ extension_ranges=[],
+ oneofs=[
+ ],
+ serialized_start=183,
+ serialized_end=286,
+)
+
+
+_SIGMAOUTPUT = _descriptor.Descriptor(
+ name='SigmaOutput',
+ full_name='protos.SigmaOutput',
+ filename=None,
+ file=DESCRIPTOR,
+ containing_type=None,
+ fields=[
+ _descriptor.FieldDescriptor(
+ name='tcert', full_name='protos.SigmaOutput.tcert', index=0,
+ number=1, type=12, cpp_type=9, label=1,
+ has_default_value=False, default_value=_b(""),
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None),
+ _descriptor.FieldDescriptor(
+ name='sigma', full_name='protos.SigmaOutput.sigma', index=1,
+ number=2, type=12, cpp_type=9, label=1,
+ has_default_value=False, default_value=_b(""),
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None),
+ _descriptor.FieldDescriptor(
+ name='asn1Encoding', full_name='protos.SigmaOutput.asn1Encoding', index=2,
+ number=3, type=12, cpp_type=9, label=1,
+ has_default_value=False, default_value=_b(""),
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None),
+ ],
+ extensions=[
+ ],
+ nested_types=[],
+ enum_types=[
+ ],
+ options=None,
+ is_extendable=False,
+ syntax='proto3',
+ extension_ranges=[],
+ oneofs=[
+ ],
+ serialized_start=288,
+ serialized_end=353,
+)
+
+
+_BUILDRESULT = _descriptor.Descriptor(
+ name='BuildResult',
+ full_name='protos.BuildResult',
+ filename=None,
+ file=DESCRIPTOR,
+ containing_type=None,
+ fields=[
+ _descriptor.FieldDescriptor(
+ name='status', full_name='protos.BuildResult.status', index=0,
+ number=1, type=14, cpp_type=8, label=1,
+ has_default_value=False, default_value=0,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None),
+ _descriptor.FieldDescriptor(
+ name='msg', full_name='protos.BuildResult.msg', index=1,
+ number=2, type=9, cpp_type=9, label=1,
+ has_default_value=False, default_value=_b("").decode('utf-8'),
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None),
+ _descriptor.FieldDescriptor(
+ name='deploymentSpec', full_name='protos.BuildResult.deploymentSpec', index=2,
+ number=3, type=11, cpp_type=10, label=1,
+ has_default_value=False, default_value=None,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None),
+ ],
+ extensions=[
+ ],
+ nested_types=[],
+ enum_types=[
+ _BUILDRESULT_STATUSCODE,
+ ],
+ options=None,
+ is_extendable=False,
+ syntax='proto3',
+ extension_ranges=[],
+ oneofs=[
+ ],
+ serialized_start=356,
+ serialized_end=542,
+)
+
+
+_TRANSACTIONREQUEST = _descriptor.Descriptor(
+ name='TransactionRequest',
+ full_name='protos.TransactionRequest',
+ filename=None,
+ file=DESCRIPTOR,
+ containing_type=None,
+ fields=[
+ _descriptor.FieldDescriptor(
+ name='transactionUuid', full_name='protos.TransactionRequest.transactionUuid', index=0,
+ number=1, type=9, cpp_type=9, label=1,
+ has_default_value=False, default_value=_b("").decode('utf-8'),
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None),
+ ],
+ extensions=[
+ ],
+ nested_types=[],
+ enum_types=[
+ ],
+ options=None,
+ is_extendable=False,
+ syntax='proto3',
+ extension_ranges=[],
+ oneofs=[
+ ],
+ serialized_start=544,
+ serialized_end=589,
+)
+
+_SIGMAINPUT.fields_by_name['secret'].message_type = _SECRET
+_EXECUTEWITHBINDING.fields_by_name['chaincodeInvocationSpec'].message_type = chaincode__pb2._CHAINCODEINVOCATIONSPEC
+_BUILDRESULT.fields_by_name['status'].enum_type = _BUILDRESULT_STATUSCODE
+_BUILDRESULT.fields_by_name['deploymentSpec'].message_type = chaincode__pb2._CHAINCODEDEPLOYMENTSPEC
+_BUILDRESULT_STATUSCODE.containing_type = _BUILDRESULT
+DESCRIPTOR.message_types_by_name['Secret'] = _SECRET
+DESCRIPTOR.message_types_by_name['SigmaInput'] = _SIGMAINPUT
+DESCRIPTOR.message_types_by_name['ExecuteWithBinding'] = _EXECUTEWITHBINDING
+DESCRIPTOR.message_types_by_name['SigmaOutput'] = _SIGMAOUTPUT
+DESCRIPTOR.message_types_by_name['BuildResult'] = _BUILDRESULT
+DESCRIPTOR.message_types_by_name['TransactionRequest'] = _TRANSACTIONREQUEST
+
+Secret = _reflection.GeneratedProtocolMessageType('Secret', (_message.Message,), dict(
+ DESCRIPTOR = _SECRET,
+ __module__ = 'devops_pb2'
+ # @@protoc_insertion_point(class_scope:protos.Secret)
+ ))
+_sym_db.RegisterMessage(Secret)
+
+SigmaInput = _reflection.GeneratedProtocolMessageType('SigmaInput', (_message.Message,), dict(
+ DESCRIPTOR = _SIGMAINPUT,
+ __module__ = 'devops_pb2'
+ # @@protoc_insertion_point(class_scope:protos.SigmaInput)
+ ))
+_sym_db.RegisterMessage(SigmaInput)
+
+ExecuteWithBinding = _reflection.GeneratedProtocolMessageType('ExecuteWithBinding', (_message.Message,), dict(
+ DESCRIPTOR = _EXECUTEWITHBINDING,
+ __module__ = 'devops_pb2'
+ # @@protoc_insertion_point(class_scope:protos.ExecuteWithBinding)
+ ))
+_sym_db.RegisterMessage(ExecuteWithBinding)
+
+SigmaOutput = _reflection.GeneratedProtocolMessageType('SigmaOutput', (_message.Message,), dict(
+ DESCRIPTOR = _SIGMAOUTPUT,
+ __module__ = 'devops_pb2'
+ # @@protoc_insertion_point(class_scope:protos.SigmaOutput)
+ ))
+_sym_db.RegisterMessage(SigmaOutput)
+
+BuildResult = _reflection.GeneratedProtocolMessageType('BuildResult', (_message.Message,), dict(
+ DESCRIPTOR = _BUILDRESULT,
+ __module__ = 'devops_pb2'
+ # @@protoc_insertion_point(class_scope:protos.BuildResult)
+ ))
+_sym_db.RegisterMessage(BuildResult)
+
+TransactionRequest = _reflection.GeneratedProtocolMessageType('TransactionRequest', (_message.Message,), dict(
+ DESCRIPTOR = _TRANSACTIONREQUEST,
+ __module__ = 'devops_pb2'
+ # @@protoc_insertion_point(class_scope:protos.TransactionRequest)
+ ))
+_sym_db.RegisterMessage(TransactionRequest)
+
+
+import abc
+import six
+from grpc.beta import implementations as beta_implementations
+from grpc.beta import interfaces as beta_interfaces
+from grpc.framework.common import cardinality
+from grpc.framework.interfaces.face import utilities as face_utilities
+
+class BetaDevopsServicer(object):
+ """Interface exported by the server.
+ """
+ def Login(self, request, context):
+ """Log in - passed Secret object and returns Response object, where
+ msg is the security context to be used in subsequent invocations
+ """
+ context.code(beta_interfaces.StatusCode.UNIMPLEMENTED)
+ def Build(self, request, context):
+ """Build the chaincode package.
+ """
+ context.code(beta_interfaces.StatusCode.UNIMPLEMENTED)
+ def Deploy(self, request, context):
+ """Deploy the chaincode package to the chain.
+ """
+ context.code(beta_interfaces.StatusCode.UNIMPLEMENTED)
+ def Invoke(self, request, context):
+ """Invoke chaincode.
+ """
+ context.code(beta_interfaces.StatusCode.UNIMPLEMENTED)
+ def Query(self, request, context):
+ """Invoke chaincode.
+ """
+ context.code(beta_interfaces.StatusCode.UNIMPLEMENTED)
+ def GetTransactionResult(self, request, context):
+ """Request a TransactionResult. The Response.Msg will contain the TransactionResult if successfully found the transaction in the chain.
+ """
+ context.code(beta_interfaces.StatusCode.UNIMPLEMENTED)
+ def EXP_GetApplicationTCert(self, request, context):
+ """Retrieve a TCert.
+ """
+ context.code(beta_interfaces.StatusCode.UNIMPLEMENTED)
+ def EXP_PrepareForTx(self, request, context):
+ """Prepare for performing a TX, which will return a binding that can later be used to sign and then execute a transaction.
+ """
+ context.code(beta_interfaces.StatusCode.UNIMPLEMENTED)
+ def EXP_ProduceSigma(self, request, context):
+ """Prepare for performing a TX, which will return a binding that can later be used to sign and then execute a transaction.
+ """
+ context.code(beta_interfaces.StatusCode.UNIMPLEMENTED)
+ def EXP_ExecuteWithBinding(self, request, context):
+ """Execute a transaction with a specific binding
+ """
+ context.code(beta_interfaces.StatusCode.UNIMPLEMENTED)
+
+class BetaDevopsStub(object):
+ """Interface exported by the server.
+ """
+ def Login(self, request, timeout):
+ """Log in - passed Secret object and returns Response object, where
+ msg is the security context to be used in subsequent invocations
+ """
+ raise NotImplementedError()
+ Login.future = None
+ def Build(self, request, timeout):
+ """Build the chaincode package.
+ """
+ raise NotImplementedError()
+ Build.future = None
+ def Deploy(self, request, timeout):
+ """Deploy the chaincode package to the chain.
+ """
+ raise NotImplementedError()
+ Deploy.future = None
+ def Invoke(self, request, timeout):
+ """Invoke chaincode.
+ """
+ raise NotImplementedError()
+ Invoke.future = None
+ def Query(self, request, timeout):
+ """Invoke chaincode.
+ """
+ raise NotImplementedError()
+ Query.future = None
+ def GetTransactionResult(self, request, timeout):
+ """Request a TransactionResult. The Response.Msg will contain the TransactionResult if successfully found the transaction in the chain.
+ """
+ raise NotImplementedError()
+ GetTransactionResult.future = None
+ def EXP_GetApplicationTCert(self, request, timeout):
+ """Retrieve a TCert.
+ """
+ raise NotImplementedError()
+ EXP_GetApplicationTCert.future = None
+ def EXP_PrepareForTx(self, request, timeout):
+ """Prepare for performing a TX, which will return a binding that can later be used to sign and then execute a transaction.
+ """
+ raise NotImplementedError()
+ EXP_PrepareForTx.future = None
+ def EXP_ProduceSigma(self, request, timeout):
+ """Prepare for performing a TX, which will return a binding that can later be used to sign and then execute a transaction.
+ """
+ raise NotImplementedError()
+ EXP_ProduceSigma.future = None
+ def EXP_ExecuteWithBinding(self, request, timeout):
+ """Execute a transaction with a specific binding
+ """
+ raise NotImplementedError()
+ EXP_ExecuteWithBinding.future = None
+
+def beta_create_Devops_server(servicer, pool=None, pool_size=None, default_timeout=None, maximum_timeout=None):
+ import devops_pb2
+ import fabric_pb2
+ import chaincode_pb2
+ import chaincode_pb2
+ import chaincode_pb2
+ import chaincode_pb2
+ import chaincode_pb2
+ import fabric_pb2
+ import chaincode_pb2
+ import fabric_pb2
+ import devops_pb2
+ import fabric_pb2
+ import devops_pb2
+ import fabric_pb2
+ import devops_pb2
+ import fabric_pb2
+ import devops_pb2
+ import fabric_pb2
+ import devops_pb2
+ import fabric_pb2
+ request_deserializers = {
+ ('protos.Devops', 'Build'): chaincode_pb2.ChaincodeSpec.FromString,
+ ('protos.Devops', 'Deploy'): chaincode_pb2.ChaincodeSpec.FromString,
+ ('protos.Devops', 'EXP_ExecuteWithBinding'): devops_pb2.ExecuteWithBinding.FromString,
+ ('protos.Devops', 'EXP_GetApplicationTCert'): devops_pb2.Secret.FromString,
+ ('protos.Devops', 'EXP_PrepareForTx'): devops_pb2.Secret.FromString,
+ ('protos.Devops', 'EXP_ProduceSigma'): devops_pb2.SigmaInput.FromString,
+ ('protos.Devops', 'GetTransactionResult'): devops_pb2.TransactionRequest.FromString,
+ ('protos.Devops', 'Invoke'): chaincode_pb2.ChaincodeInvocationSpec.FromString,
+ ('protos.Devops', 'Login'): devops_pb2.Secret.FromString,
+ ('protos.Devops', 'Query'): chaincode_pb2.ChaincodeInvocationSpec.FromString,
+ }
+ response_serializers = {
+ ('protos.Devops', 'Build'): chaincode_pb2.ChaincodeDeploymentSpec.SerializeToString,
+ ('protos.Devops', 'Deploy'): chaincode_pb2.ChaincodeDeploymentSpec.SerializeToString,
+ ('protos.Devops', 'EXP_ExecuteWithBinding'): fabric_pb2.Response.SerializeToString,
+ ('protos.Devops', 'EXP_GetApplicationTCert'): fabric_pb2.Response.SerializeToString,
+ ('protos.Devops', 'EXP_PrepareForTx'): fabric_pb2.Response.SerializeToString,
+ ('protos.Devops', 'EXP_ProduceSigma'): fabric_pb2.Response.SerializeToString,
+ ('protos.Devops', 'GetTransactionResult'): fabric_pb2.Response.SerializeToString,
+ ('protos.Devops', 'Invoke'): fabric_pb2.Response.SerializeToString,
+ ('protos.Devops', 'Login'): fabric_pb2.Response.SerializeToString,
+ ('protos.Devops', 'Query'): fabric_pb2.Response.SerializeToString,
+ }
+ method_implementations = {
+ ('protos.Devops', 'Build'): face_utilities.unary_unary_inline(servicer.Build),
+ ('protos.Devops', 'Deploy'): face_utilities.unary_unary_inline(servicer.Deploy),
+ ('protos.Devops', 'EXP_ExecuteWithBinding'): face_utilities.unary_unary_inline(servicer.EXP_ExecuteWithBinding),
+ ('protos.Devops', 'EXP_GetApplicationTCert'): face_utilities.unary_unary_inline(servicer.EXP_GetApplicationTCert),
+ ('protos.Devops', 'EXP_PrepareForTx'): face_utilities.unary_unary_inline(servicer.EXP_PrepareForTx),
+ ('protos.Devops', 'EXP_ProduceSigma'): face_utilities.unary_unary_inline(servicer.EXP_ProduceSigma),
+ ('protos.Devops', 'GetTransactionResult'): face_utilities.unary_unary_inline(servicer.GetTransactionResult),
+ ('protos.Devops', 'Invoke'): face_utilities.unary_unary_inline(servicer.Invoke),
+ ('protos.Devops', 'Login'): face_utilities.unary_unary_inline(servicer.Login),
+ ('protos.Devops', 'Query'): face_utilities.unary_unary_inline(servicer.Query),
+ }
+ server_options = beta_implementations.server_options(request_deserializers=request_deserializers, response_serializers=response_serializers, thread_pool=pool, thread_pool_size=pool_size, default_timeout=default_timeout, maximum_timeout=maximum_timeout)
+ return beta_implementations.server(method_implementations, options=server_options)
+
+def beta_create_Devops_stub(channel, host=None, metadata_transformer=None, pool=None, pool_size=None):
+ import devops_pb2
+ import fabric_pb2
+ import chaincode_pb2
+ import chaincode_pb2
+ import chaincode_pb2
+ import chaincode_pb2
+ import chaincode_pb2
+ import fabric_pb2
+ import chaincode_pb2
+ import fabric_pb2
+ import devops_pb2
+ import fabric_pb2
+ import devops_pb2
+ import fabric_pb2
+ import devops_pb2
+ import fabric_pb2
+ import devops_pb2
+ import fabric_pb2
+ import devops_pb2
+ import fabric_pb2
+ request_serializers = {
+ ('protos.Devops', 'Build'): chaincode_pb2.ChaincodeSpec.SerializeToString,
+ ('protos.Devops', 'Deploy'): chaincode_pb2.ChaincodeSpec.SerializeToString,
+ ('protos.Devops', 'EXP_ExecuteWithBinding'): devops_pb2.ExecuteWithBinding.SerializeToString,
+ ('protos.Devops', 'EXP_GetApplicationTCert'): devops_pb2.Secret.SerializeToString,
+ ('protos.Devops', 'EXP_PrepareForTx'): devops_pb2.Secret.SerializeToString,
+ ('protos.Devops', 'EXP_ProduceSigma'): devops_pb2.SigmaInput.SerializeToString,
+ ('protos.Devops', 'GetTransactionResult'): devops_pb2.TransactionRequest.SerializeToString,
+ ('protos.Devops', 'Invoke'): chaincode_pb2.ChaincodeInvocationSpec.SerializeToString,
+ ('protos.Devops', 'Login'): devops_pb2.Secret.SerializeToString,
+ ('protos.Devops', 'Query'): chaincode_pb2.ChaincodeInvocationSpec.SerializeToString,
+ }
+ response_deserializers = {
+ ('protos.Devops', 'Build'): chaincode_pb2.ChaincodeDeploymentSpec.FromString,
+ ('protos.Devops', 'Deploy'): chaincode_pb2.ChaincodeDeploymentSpec.FromString,
+ ('protos.Devops', 'EXP_ExecuteWithBinding'): fabric_pb2.Response.FromString,
+ ('protos.Devops', 'EXP_GetApplicationTCert'): fabric_pb2.Response.FromString,
+ ('protos.Devops', 'EXP_PrepareForTx'): fabric_pb2.Response.FromString,
+ ('protos.Devops', 'EXP_ProduceSigma'): fabric_pb2.Response.FromString,
+ ('protos.Devops', 'GetTransactionResult'): fabric_pb2.Response.FromString,
+ ('protos.Devops', 'Invoke'): fabric_pb2.Response.FromString,
+ ('protos.Devops', 'Login'): fabric_pb2.Response.FromString,
+ ('protos.Devops', 'Query'): fabric_pb2.Response.FromString,
+ }
+ cardinalities = {
+ 'Build': cardinality.Cardinality.UNARY_UNARY,
+ 'Deploy': cardinality.Cardinality.UNARY_UNARY,
+ 'EXP_ExecuteWithBinding': cardinality.Cardinality.UNARY_UNARY,
+ 'EXP_GetApplicationTCert': cardinality.Cardinality.UNARY_UNARY,
+ 'EXP_PrepareForTx': cardinality.Cardinality.UNARY_UNARY,
+ 'EXP_ProduceSigma': cardinality.Cardinality.UNARY_UNARY,
+ 'GetTransactionResult': cardinality.Cardinality.UNARY_UNARY,
+ 'Invoke': cardinality.Cardinality.UNARY_UNARY,
+ 'Login': cardinality.Cardinality.UNARY_UNARY,
+ 'Query': cardinality.Cardinality.UNARY_UNARY,
+ }
+ stub_options = beta_implementations.stub_options(host=host, metadata_transformer=metadata_transformer, request_serializers=request_serializers, response_deserializers=response_deserializers, thread_pool=pool, thread_pool_size=pool_size)
+ return beta_implementations.dynamic_stub(channel, 'protos.Devops', cardinalities, options=stub_options)
+# @@protoc_insertion_point(module_scope)
diff --git a/bddtests/docker-compose-1-devmode.yml b/bddtests/docker-compose-1-devmode.yml
new file mode 100755
index 00000000000..6a9ce286cf3
--- /dev/null
+++ b/bddtests/docker-compose-1-devmode.yml
@@ -0,0 +1,16 @@
+vp0:
+ extends:
+ file: compose-defaults.yml
+ service: vp
+ environment:
+ - CORE_PEER_ID=vp0
+ command: sh -c "peer node start --peer-chaincodedev"
+
+ccenv:
+ image: hyperledger/fabric-ccenv
+ environment:
+ - CORE_CHAINCODE_ID_NAME=testCC
+ - CORE_PEER_ADDRESS=vp0:30303
+ command: bash -c "GOBIN=/opt/gopath/bin go install github.com/hyperledger/fabric/examples/chaincode/go/chaincode_example02 && /opt/gopath/bin/chaincode_example02"
+ links:
+ - vp0
\ No newline at end of file
diff --git a/bddtests/docker-compose-1-exp.yml b/bddtests/docker-compose-1-exp.yml
new file mode 100644
index 00000000000..51c30cd4c7d
--- /dev/null
+++ b/bddtests/docker-compose-1-exp.yml
@@ -0,0 +1,8 @@
+vp0:
+ extends:
+ file: compose-defaults.yml
+ service: vp
+ environment:
+ - CORE_PEER_ID=vp0
+ ports:
+ - 31315:31315
diff --git a/bddtests/docker-compose-1-profiling.yml b/bddtests/docker-compose-1-profiling.yml
new file mode 100644
index 00000000000..252e7b793af
--- /dev/null
+++ b/bddtests/docker-compose-1-profiling.yml
@@ -0,0 +1,10 @@
+vp0:
+ extends:
+ file: compose-defaults.yml
+ service: vp
+ environment:
+ - CORE_PEER_ID=vp0
+ - CORE_PEER_PROFILE_ENABLED=true
+ ports:
+ - 5000:6060
+
diff --git a/bddtests/docker-compose-1.yml b/bddtests/docker-compose-1.yml
new file mode 100644
index 00000000000..dbad207779e
--- /dev/null
+++ b/bddtests/docker-compose-1.yml
@@ -0,0 +1,7 @@
+vp0:
+ extends:
+ file: compose-defaults.yml
+ service: vp
+ environment:
+ - CORE_PEER_ID=vp0
+
diff --git a/bddtests/docker-compose-2-tls-basic.yml b/bddtests/docker-compose-2-tls-basic.yml
new file mode 100644
index 00000000000..05aa0e9a407
--- /dev/null
+++ b/bddtests/docker-compose-2-tls-basic.yml
@@ -0,0 +1,19 @@
+vp0:
+ extends:
+ file: compose-defaults.yml
+ service: vp
+ environment:
+ - CORE_PEER_ID=vp0
+ - CORE_PEER_TLS_ENABLED=true
+ - CORE_PEER_TLS_SERVERHOSTOVERRIDE=OBC
+ - CORE_PEER_TLS_CERT_FILE=./bddtests/tlsca.cert
+ - CORE_PEER_TLS_KEY_FILE=./bddtests/tlsca.priv
+
+vp1:
+ extends:
+ service: vp0
+ environment:
+ - CORE_PEER_ID=vp1
+ - CORE_PEER_DISCOVERY_ROOTNODE=vp0:30303
+ links:
+ - vp0
diff --git a/bddtests/docker-compose-2.yml b/bddtests/docker-compose-2.yml
new file mode 100644
index 00000000000..d96ba464827
--- /dev/null
+++ b/bddtests/docker-compose-2.yml
@@ -0,0 +1,17 @@
+vp0:
+ extends:
+ file: compose-defaults.yml
+ service: vp
+ environment:
+ - CORE_PEER_ID=vp0
+ - CORE_PEER_DISCOVERY_PERIOD=1s
+ - CORE_PEER_DISCOVERY_TOUCHPERIOD=1s
+
+vp1:
+ extends:
+ service: vp0
+ environment:
+ - CORE_PEER_ID=vp1
+ - CORE_PEER_DISCOVERY_ROOTNODE=vp0:30303
+ links:
+ - vp0
diff --git a/bddtests/docker-compose-3.yml b/bddtests/docker-compose-3.yml
new file mode 100644
index 00000000000..61e2e16fd32
--- /dev/null
+++ b/bddtests/docker-compose-3.yml
@@ -0,0 +1,24 @@
+vp0:
+ extends:
+ file: compose-defaults.yml
+ service: vp
+ environment:
+ - CORE_PEER_ID=vp0
+
+vp1:
+ extends:
+ service: vp0
+ environment:
+ - CORE_PEER_ID=vp1
+ - CORE_PEER_DISCOVERY_ROOTNODE=vp0:30303
+ links:
+ - vp0
+
+vp2:
+ extends:
+ service: vp0
+ environment:
+ - CORE_PEER_ID=vp2
+ - CORE_PEER_DISCOVERY_ROOTNODE=vp0:30303
+ links:
+ - vp0
diff --git a/bddtests/docker-compose-4-consensus-base.yml b/bddtests/docker-compose-4-consensus-base.yml
new file mode 100644
index 00000000000..f855ae29941
--- /dev/null
+++ b/bddtests/docker-compose-4-consensus-base.yml
@@ -0,0 +1,24 @@
+vpBase:
+ extends:
+ file: compose-defaults.yml
+ service: vp
+ environment:
+ - CORE_SECURITY_ENABLED=true
+ - CORE_PEER_PKI_ECA_PADDR=membersrvc0:50051
+ - CORE_PEER_PKI_TCA_PADDR=membersrvc0:50051
+ - CORE_PEER_PKI_TLSCA_PADDR=membersrvc0:50051
+ - CORE_PEER_PKI_TLS_ROOTCERT_FILE=./bddtests/tlsca.cert
+ # TODO: Currently required due to issue reading obbca configuration location
+ - CORE_PBFT_GENERAL_N=4
+ # The checkpoint interval in sequence numbers
+ - CORE_PBFT_GENERAL_K=2
+
+vpBatch:
+ extends:
+ service: vpBase
+ environment:
+ - CORE_PEER_VALIDATOR_CONSENSUS_PLUGIN=pbft
+ - CORE_PBFT_GENERAL_TIMEOUT_REQUEST=10s
+ - CORE_PBFT_GENERAL_MODE=batch
+ # TODO: This is used for testing as to assure deployment goes through to block
+ - CORE_PBFT_GENERAL_BATCHSIZE=1
diff --git a/bddtests/docker-compose-4-consensus-batch-1-byzantine.yml b/bddtests/docker-compose-4-consensus-batch-1-byzantine.yml
new file mode 100644
index 00000000000..bbc5480e81b
--- /dev/null
+++ b/bddtests/docker-compose-4-consensus-batch-1-byzantine.yml
@@ -0,0 +1,55 @@
+membersrvc0:
+ extends:
+ file: compose-defaults.yml
+ service: membersrvc
+
+vp0:
+ extends:
+ file: docker-compose-4-consensus-base.yml
+ service: vpBatch
+ environment:
+ - CORE_PEER_ID=vp0
+ - CORE_SECURITY_ENROLLID=test_vp0
+ - CORE_SECURITY_ENROLLSECRET=MwYpmSRjupbT
+ links:
+ - membersrvc0
+
+vp1:
+ extends:
+ file: docker-compose-4-consensus-base.yml
+ service: vpBatch
+ environment:
+ - CORE_PEER_ID=vp1
+ - CORE_PEER_DISCOVERY_ROOTNODE=vp0:30303
+ - CORE_SECURITY_ENROLLID=test_vp1
+ - CORE_SECURITY_ENROLLSECRET=5wgHK9qqYaPy
+ links:
+ - membersrvc0
+ - vp0
+
+vp2:
+ extends:
+ file: docker-compose-4-consensus-base.yml
+ service: vpBatch
+ environment:
+ - CORE_PEER_ID=vp2
+ - CORE_PEER_DISCOVERY_ROOTNODE=vp0:30303
+ - CORE_SECURITY_ENROLLID=test_vp2
+ - CORE_SECURITY_ENROLLSECRET=vQelbRvja7cJ
+ links:
+ - membersrvc0
+ - vp0
+
+vp3:
+ extends:
+ file: docker-compose-4-consensus-base.yml
+ service: vpBatch
+ environment:
+ - CORE_PEER_ID=vp3
+ - CORE_PEER_DISCOVERY_ROOTNODE=vp0:30303
+ - CORE_SECURITY_ENROLLID=test_vp3
+ - CORE_SECURITY_ENROLLSECRET=9LKqKH5peurL
+ - CORE_PBFT_GENERAL_BYZANTINE=true
+ links:
+ - membersrvc0
+ - vp0
diff --git a/bddtests/docker-compose-4-consensus-batch.yml b/bddtests/docker-compose-4-consensus-batch.yml
new file mode 100644
index 00000000000..b9c498a7f50
--- /dev/null
+++ b/bddtests/docker-compose-4-consensus-batch.yml
@@ -0,0 +1,58 @@
+membersrvc0:
+ extends:
+ file: compose-defaults.yml
+ service: membersrvc
+
+vp0:
+ extends:
+ file: docker-compose-4-consensus-base.yml
+ service: vpBatch
+ environment:
+ - CORE_PEER_ID=vp0
+ - CORE_SECURITY_ENROLLID=test_vp0
+ - CORE_SECURITY_ENROLLSECRET=MwYpmSRjupbT
+ - CORE_PEER_PROFILE_ENABLED=true
+ links:
+ - membersrvc0
+ ports:
+ - 5000:6060
+
+
+vp1:
+ extends:
+ file: docker-compose-4-consensus-base.yml
+ service: vpBatch
+ environment:
+ - CORE_PEER_ID=vp1
+ - CORE_PEER_DISCOVERY_ROOTNODE=vp0:30303
+ - CORE_SECURITY_ENROLLID=test_vp1
+ - CORE_SECURITY_ENROLLSECRET=5wgHK9qqYaPy
+ links:
+ - membersrvc0
+ - vp0
+
+vp2:
+ extends:
+ file: docker-compose-4-consensus-base.yml
+ service: vpBatch
+ environment:
+ - CORE_PEER_ID=vp2
+ - CORE_PEER_DISCOVERY_ROOTNODE=vp0:30303
+ - CORE_SECURITY_ENROLLID=test_vp2
+ - CORE_SECURITY_ENROLLSECRET=vQelbRvja7cJ
+ links:
+ - membersrvc0
+ - vp0
+
+vp3:
+ extends:
+ file: docker-compose-4-consensus-base.yml
+ service: vpBatch
+ environment:
+ - CORE_PEER_ID=vp3
+ - CORE_PEER_DISCOVERY_ROOTNODE=vp0:30303
+ - CORE_SECURITY_ENROLLID=test_vp3
+ - CORE_SECURITY_ENROLLSECRET=9LKqKH5peurL
+ links:
+ - membersrvc0
+ - vp0
diff --git a/bddtests/docker-compose-4-consensus-noops.yml b/bddtests/docker-compose-4-consensus-noops.yml
new file mode 100644
index 00000000000..771905fd1d0
--- /dev/null
+++ b/bddtests/docker-compose-4-consensus-noops.yml
@@ -0,0 +1,56 @@
+membersrvc0:
+ extends:
+ file: compose-defaults.yml
+ service: membersrvc
+
+vp0:
+ extends:
+ file: docker-compose-4-consensus-base.yml
+ service: vpBase
+ environment:
+ - CORE_PEER_ID=vp0
+ - CORE_SECURITY_ENROLLID=test_vp0
+ - CORE_SECURITY_ENROLLSECRET=MwYpmSRjupbT
+ links:
+ - membersrvc0
+ ports:
+ - 5000:5000
+
+vp1:
+ extends:
+ file: docker-compose-4-consensus-base.yml
+ service: vpBase
+ environment:
+ - CORE_PEER_ID=vp1
+ - CORE_PEER_DISCOVERY_ROOTNODE=vp0:30303
+ - CORE_SECURITY_ENROLLID=test_vp1
+ - CORE_SECURITY_ENROLLSECRET=5wgHK9qqYaPy
+ links:
+ - membersrvc0
+ - vp0
+
+vp2:
+ extends:
+ file: docker-compose-4-consensus-base.yml
+ service: vpBase
+ environment:
+ - CORE_PEER_ID=vp2
+ - CORE_PEER_DISCOVERY_ROOTNODE=vp0:30303
+ - CORE_SECURITY_ENROLLID=test_vp2
+ - CORE_SECURITY_ENROLLSECRET=vQelbRvja7cJ
+ links:
+ - membersrvc0
+ - vp0
+
+vp3:
+ extends:
+ file: docker-compose-4-consensus-base.yml
+ service: vpBase
+ environment:
+ - CORE_PEER_ID=vp3
+ - CORE_PEER_DISCOVERY_ROOTNODE=vp0:30303
+ - CORE_SECURITY_ENROLLID=test_vp3
+ - CORE_SECURITY_ENROLLSECRET=9LKqKH5peurL
+ links:
+ - membersrvc0
+ - vp0
diff --git a/bddtests/docker-compose-4-consensus-nvp0.yml b/bddtests/docker-compose-4-consensus-nvp0.yml
new file mode 100644
index 00000000000..2d32b0ddbdb
--- /dev/null
+++ b/bddtests/docker-compose-4-consensus-nvp0.yml
@@ -0,0 +1,14 @@
+nvp0:
+ extends:
+ file: docker-compose-4-consensus-base.yml
+ service: vpBase
+ environment:
+ - CORE_PEER_VALIDATOR_ENABLED=false
+ - CORE_PEER_DISCOVERY_ROOTNODE=vp0:30303
+ - CORE_PEER_ID=nvp0
+ - CORE_SECURITY_ENROLLID=test_nvp0
+ - CORE_SECURITY_ENROLLSECRET=iywrPBDEPl0K
+ links:
+ - membersrvc0
+ - vp0
+
diff --git a/bddtests/docker-compose-4-consensus-vp3-byzantine.yml b/bddtests/docker-compose-4-consensus-vp3-byzantine.yml
new file mode 100644
index 00000000000..b64857f60e8
--- /dev/null
+++ b/bddtests/docker-compose-4-consensus-vp3-byzantine.yml
@@ -0,0 +1,3 @@
+vp3:
+ environment:
+ - CORE_PBFT_GENERAL_BYZANTINE=true
diff --git a/bddtests/docker-compose-4.yml b/bddtests/docker-compose-4.yml
new file mode 100644
index 00000000000..914db06f201
--- /dev/null
+++ b/bddtests/docker-compose-4.yml
@@ -0,0 +1,33 @@
+vp0:
+ extends:
+ file: compose-defaults.yml
+ service: vp
+ environment:
+ - CORE_PEER_ID=vp0
+
+vp1:
+ extends:
+ service: vp0
+ environment:
+ - CORE_PEER_ID=vp1
+ - CORE_PEER_DISCOVERY_ROOTNODE=vp0:30303
+ links:
+ - vp0
+
+vp2:
+ extends:
+ service: vp0
+ environment:
+ - CORE_PEER_ID=vp2
+ - CORE_PEER_DISCOVERY_ROOTNODE=vp0:30303
+ links:
+ - vp0
+
+vp3:
+ extends:
+ service: vp0
+ environment:
+ - CORE_PEER_ID=vp3
+ - CORE_PEER_DISCOVERY_ROOTNODE=vp0:30303
+ links:
+ - vp0
diff --git a/bddtests/docker-compose-5.yml b/bddtests/docker-compose-5.yml
new file mode 100644
index 00000000000..869543b9ded
--- /dev/null
+++ b/bddtests/docker-compose-5.yml
@@ -0,0 +1,42 @@
+vp0:
+ extends:
+ file: compose-defaults.yml
+ service: vp
+ environment:
+ - CORE_PEER_ID=vp0
+
+vp1:
+ extends:
+ service: vp0
+ environment:
+ - CORE_PEER_ID=vp1
+ - CORE_PEER_DISCOVERY_ROOTNODE=vp0:30303
+ links:
+ - vp0
+
+vp2:
+ extends:
+ service: vp0
+ environment:
+ - CORE_PEER_ID=vp2
+ - CORE_PEER_DISCOVERY_ROOTNODE=vp0:30303
+ links:
+ - vp0
+
+vp3:
+ extends:
+ service: vp0
+ environment:
+ - CORE_PEER_ID=vp3
+ - CORE_PEER_DISCOVERY_ROOTNODE=vp0:30303
+ links:
+ - vp0
+
+vp4:
+ extends:
+ service: vp0
+ environment:
+ - CORE_PEER_ID=vp4
+ - CORE_PEER_DISCOVERY_ROOTNODE=vp0:30303
+ links:
+ - vp0
diff --git a/bddtests/docker-compose-sdk-node.yml b/bddtests/docker-compose-sdk-node.yml
new file mode 100644
index 00000000000..27239a8a8a6
--- /dev/null
+++ b/bddtests/docker-compose-sdk-node.yml
@@ -0,0 +1,12 @@
+sampleApp0:
+ image: hyperledger-sdk-node
+ environment:
+ - MEMBERSRVC_ADDRESS=membersrvc0:50051
+ - PEER_ADDRESS=vp0:30303
+ - KEY_VALUE_STORE=/tmp/hl_sdk_node_key_value_store
+ # Startup of peer must be delayed to allow membersrvc to come up first
+ command: node sampleSDKApp
+ links:
+ - membersrvc0
+ - vp0
+
diff --git a/bddtests/docker-membersrvc-attributes-enabled.yml b/bddtests/docker-membersrvc-attributes-enabled.yml
new file mode 100644
index 00000000000..0269d51ef7e
--- /dev/null
+++ b/bddtests/docker-membersrvc-attributes-enabled.yml
@@ -0,0 +1,3 @@
+membersrvc0:
+ environment:
+ - MEMBERSRVC_CA_ACA_ENABLED=true
diff --git a/bddtests/docker-membersrvc-attributes-encryption-enabled.yml b/bddtests/docker-membersrvc-attributes-encryption-enabled.yml
new file mode 100644
index 00000000000..6bb3978cee7
--- /dev/null
+++ b/bddtests/docker-membersrvc-attributes-encryption-enabled.yml
@@ -0,0 +1,3 @@
+membersrvc0:
+ environment:
+ - MEMBERSRVC_CA_TCA_ATTRIBUTE-ENCRYPTION_ENABLED=true
diff --git a/bddtests/environment.py b/bddtests/environment.py
new file mode 100644
index 00000000000..c2e3e5bf19a
--- /dev/null
+++ b/bddtests/environment.py
@@ -0,0 +1,79 @@
+import subprocess
+import os
+import glob
+
+from steps.bdd_test_util import cli_call
+
+from steps.coverage import saveCoverageFiles, createCoverageAggregate
+
+def coverageEnabled(context):
+ return context.config.userdata.get("coverage", "false") == "true"
+
+
+def getDockerComposeFileArgsFromYamlFile(compose_yaml):
+ parts = compose_yaml.split()
+ args = []
+ for part in parts:
+ args = args + ["-f"] + [part]
+ return args
+
+def after_scenario(context, scenario):
+ get_logs = context.config.userdata.get("logs", "N")
+ if get_logs.lower() == "force" or (scenario.status == "failed" and get_logs.lower() == "y"):
+ print("Scenario {0} failed. Getting container logs".format(scenario.name))
+ file_suffix = "_" + scenario.name.replace(" ", "_") + ".log"
+ # get logs from the peer containers
+ for containerData in context.compose_containers:
+ with open(containerData.containerName + file_suffix, "w+") as logfile:
+ sys_rc = subprocess.call(["docker", "logs", containerData.containerName], stdout=logfile, stderr=logfile)
+ if sys_rc !=0 :
+ print("Cannot get logs for {0}. Docker rc = {1}".format(containerData.containerName,sys_rc))
+ # get logs from the chaincode containers
+ cc_output, cc_error, cc_returncode = \
+ cli_call(context, ["docker", "ps", "-f", "name=dev-", "--format", "{{.Names}}"], expect_success=True)
+ for containerName in cc_output.splitlines():
+ namePart,sep,junk = containerName.rpartition("-")
+ with open(namePart + file_suffix, "w+") as logfile:
+ sys_rc = subprocess.call(["docker", "logs", containerName], stdout=logfile, stderr=logfile)
+ if sys_rc !=0 :
+ print("Cannot get logs for {0}. Docker rc = {1}".format(namepart,sys_rc))
+ if 'doNotDecompose' in scenario.tags:
+ if 'compose_yaml' in context:
+ print("Not going to decompose after scenario {0}, with yaml '{1}'".format(scenario.name, context.compose_yaml))
+ else:
+ if 'compose_yaml' in context:
+ fileArgsToDockerCompose = getDockerComposeFileArgsFromYamlFile(context.compose_yaml)
+
+ print("Decomposing with yaml '{0}' after scenario {1}, ".format(context.compose_yaml, scenario.name))
+ context.compose_output, context.compose_error, context.compose_returncode = \
+ cli_call(context, ["docker-compose"] + fileArgsToDockerCompose + ["unpause"], expect_success=True)
+ context.compose_output, context.compose_error, context.compose_returncode = \
+ cli_call(context, ["docker-compose"] + fileArgsToDockerCompose + ["stop"], expect_success=True)
+
+ if coverageEnabled(context):
+ #Save the coverage files for this scenario before removing containers
+ containerNames = [containerData.containerName for containerData in context.compose_containers]
+ saveCoverageFiles("coverage", scenario.name.replace(" ", "_"), containerNames, "cov")
+
+ context.compose_output, context.compose_error, context.compose_returncode = \
+ cli_call(context, ["docker-compose"] + fileArgsToDockerCompose + ["rm","-f"], expect_success=True)
+ # now remove any other containers (chaincodes)
+ context.compose_output, context.compose_error, context.compose_returncode = \
+ cli_call(context, ["docker", "ps", "-qa"], expect_success=True)
+ if context.compose_returncode == 0:
+ # Remove each container
+ for containerId in context.compose_output.splitlines():
+ #print("docker rm {0}".format(containerId))
+ context.compose_output, context.compose_error, context.compose_returncode = \
+ cli_call(context, ["docker", "rm", "-f", containerId], expect_success=True)
+
+# stop any running peer that could get in the way before starting the tests
+def before_all(context):
+ cli_call(context, ["../build/bin/peer", "node", "stop"], expect_success=False)
+
+# stop any running peer that could get in the way before starting the tests
+def after_all(context):
+ print("context.failed = {0}".format(context.failed))
+
+ if coverageEnabled(context):
+ createCoverageAggregate()
diff --git a/bddtests/events_pb2.py b/bddtests/events_pb2.py
new file mode 100644
index 00000000000..8903e46b02c
--- /dev/null
+++ b/bddtests/events_pb2.py
@@ -0,0 +1,313 @@
+# Generated by the protocol buffer compiler. DO NOT EDIT!
+# source: events.proto
+
+import sys
+_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
+from google.protobuf import descriptor as _descriptor
+from google.protobuf import message as _message
+from google.protobuf import reflection as _reflection
+from google.protobuf import symbol_database as _symbol_database
+from google.protobuf import descriptor_pb2
+# @@protoc_insertion_point(imports)
+
+_sym_db = _symbol_database.Default()
+
+
+import fabric_pb2 as fabric__pb2
+
+
+DESCRIPTOR = _descriptor.FileDescriptor(
+ name='events.proto',
+ package='protos',
+ syntax='proto3',
+ serialized_pb=_b('\n\x0c\x65vents.proto\x12\x06protos\x1a\x0c\x66\x61\x62ric.proto\"\x88\x01\n\x08Interest\x12\x11\n\teventType\x18\x01 \x01(\t\x12\x33\n\x0cresponseType\x18\x02 \x01(\x0e\x32\x1d.protos.Interest.ResponseType\"4\n\x0cResponseType\x12\x0c\n\x08\x44ONTSEND\x10\x00\x12\x0c\n\x08PROTOBUF\x10\x01\x12\x08\n\x04JSON\x10\x02\",\n\x08Register\x12 \n\x06\x65vents\x18\x01 \x03(\x0b\x32\x10.protos.Interest\"-\n\x07Generic\x12\x11\n\teventType\x18\x01 \x01(\t\x12\x0f\n\x07payload\x18\x02 \x01(\x0c\"z\n\x05\x45vent\x12$\n\x08register\x18\x01 \x01(\x0b\x32\x10.protos.RegisterH\x00\x12\x1e\n\x05\x62lock\x18\x02 \x01(\x0b\x32\r.protos.BlockH\x00\x12\"\n\x07generic\x18\x03 \x01(\x0b\x32\x0f.protos.GenericH\x00\x42\x07\n\x05\x45vent24\n\x06\x45vents\x12*\n\x04\x43hat\x12\r.protos.Event\x1a\r.protos.Event\"\x00(\x01\x30\x01\x62\x06proto3')
+ ,
+ dependencies=[fabric__pb2.DESCRIPTOR,])
+_sym_db.RegisterFileDescriptor(DESCRIPTOR)
+
+
+
+_INTEREST_RESPONSETYPE = _descriptor.EnumDescriptor(
+ name='ResponseType',
+ full_name='protos.Interest.ResponseType',
+ filename=None,
+ file=DESCRIPTOR,
+ values=[
+ _descriptor.EnumValueDescriptor(
+ name='DONTSEND', index=0, number=0,
+ options=None,
+ type=None),
+ _descriptor.EnumValueDescriptor(
+ name='PROTOBUF', index=1, number=1,
+ options=None,
+ type=None),
+ _descriptor.EnumValueDescriptor(
+ name='JSON', index=2, number=2,
+ options=None,
+ type=None),
+ ],
+ containing_type=None,
+ options=None,
+ serialized_start=123,
+ serialized_end=175,
+)
+_sym_db.RegisterEnumDescriptor(_INTEREST_RESPONSETYPE)
+
+
+_INTEREST = _descriptor.Descriptor(
+ name='Interest',
+ full_name='protos.Interest',
+ filename=None,
+ file=DESCRIPTOR,
+ containing_type=None,
+ fields=[
+ _descriptor.FieldDescriptor(
+ name='eventType', full_name='protos.Interest.eventType', index=0,
+ number=1, type=9, cpp_type=9, label=1,
+ has_default_value=False, default_value=_b("").decode('utf-8'),
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None),
+ _descriptor.FieldDescriptor(
+ name='responseType', full_name='protos.Interest.responseType', index=1,
+ number=2, type=14, cpp_type=8, label=1,
+ has_default_value=False, default_value=0,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None),
+ ],
+ extensions=[
+ ],
+ nested_types=[],
+ enum_types=[
+ _INTEREST_RESPONSETYPE,
+ ],
+ options=None,
+ is_extendable=False,
+ syntax='proto3',
+ extension_ranges=[],
+ oneofs=[
+ ],
+ serialized_start=39,
+ serialized_end=175,
+)
+
+
+_REGISTER = _descriptor.Descriptor(
+ name='Register',
+ full_name='protos.Register',
+ filename=None,
+ file=DESCRIPTOR,
+ containing_type=None,
+ fields=[
+ _descriptor.FieldDescriptor(
+ name='events', full_name='protos.Register.events', index=0,
+ number=1, type=11, cpp_type=10, label=3,
+ has_default_value=False, default_value=[],
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None),
+ ],
+ extensions=[
+ ],
+ nested_types=[],
+ enum_types=[
+ ],
+ options=None,
+ is_extendable=False,
+ syntax='proto3',
+ extension_ranges=[],
+ oneofs=[
+ ],
+ serialized_start=177,
+ serialized_end=221,
+)
+
+
+_GENERIC = _descriptor.Descriptor(
+ name='Generic',
+ full_name='protos.Generic',
+ filename=None,
+ file=DESCRIPTOR,
+ containing_type=None,
+ fields=[
+ _descriptor.FieldDescriptor(
+ name='eventType', full_name='protos.Generic.eventType', index=0,
+ number=1, type=9, cpp_type=9, label=1,
+ has_default_value=False, default_value=_b("").decode('utf-8'),
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None),
+ _descriptor.FieldDescriptor(
+ name='payload', full_name='protos.Generic.payload', index=1,
+ number=2, type=12, cpp_type=9, label=1,
+ has_default_value=False, default_value=_b(""),
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None),
+ ],
+ extensions=[
+ ],
+ nested_types=[],
+ enum_types=[
+ ],
+ options=None,
+ is_extendable=False,
+ syntax='proto3',
+ extension_ranges=[],
+ oneofs=[
+ ],
+ serialized_start=223,
+ serialized_end=268,
+)
+
+
+_EVENT = _descriptor.Descriptor(
+ name='Event',
+ full_name='protos.Event',
+ filename=None,
+ file=DESCRIPTOR,
+ containing_type=None,
+ fields=[
+ _descriptor.FieldDescriptor(
+ name='register', full_name='protos.Event.register', index=0,
+ number=1, type=11, cpp_type=10, label=1,
+ has_default_value=False, default_value=None,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None),
+ _descriptor.FieldDescriptor(
+ name='block', full_name='protos.Event.block', index=1,
+ number=2, type=11, cpp_type=10, label=1,
+ has_default_value=False, default_value=None,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None),
+ _descriptor.FieldDescriptor(
+ name='generic', full_name='protos.Event.generic', index=2,
+ number=3, type=11, cpp_type=10, label=1,
+ has_default_value=False, default_value=None,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None),
+ ],
+ extensions=[
+ ],
+ nested_types=[],
+ enum_types=[
+ ],
+ options=None,
+ is_extendable=False,
+ syntax='proto3',
+ extension_ranges=[],
+ oneofs=[
+ _descriptor.OneofDescriptor(
+ name='Event', full_name='protos.Event.Event',
+ index=0, containing_type=None, fields=[]),
+ ],
+ serialized_start=270,
+ serialized_end=392,
+)
+
+_INTEREST.fields_by_name['responseType'].enum_type = _INTEREST_RESPONSETYPE
+_INTEREST_RESPONSETYPE.containing_type = _INTEREST
+_REGISTER.fields_by_name['events'].message_type = _INTEREST
+_EVENT.fields_by_name['register'].message_type = _REGISTER
+_EVENT.fields_by_name['block'].message_type = fabric__pb2._BLOCK
+_EVENT.fields_by_name['generic'].message_type = _GENERIC
+_EVENT.oneofs_by_name['Event'].fields.append(
+ _EVENT.fields_by_name['register'])
+_EVENT.fields_by_name['register'].containing_oneof = _EVENT.oneofs_by_name['Event']
+_EVENT.oneofs_by_name['Event'].fields.append(
+ _EVENT.fields_by_name['block'])
+_EVENT.fields_by_name['block'].containing_oneof = _EVENT.oneofs_by_name['Event']
+_EVENT.oneofs_by_name['Event'].fields.append(
+ _EVENT.fields_by_name['generic'])
+_EVENT.fields_by_name['generic'].containing_oneof = _EVENT.oneofs_by_name['Event']
+DESCRIPTOR.message_types_by_name['Interest'] = _INTEREST
+DESCRIPTOR.message_types_by_name['Register'] = _REGISTER
+DESCRIPTOR.message_types_by_name['Generic'] = _GENERIC
+DESCRIPTOR.message_types_by_name['Event'] = _EVENT
+
+Interest = _reflection.GeneratedProtocolMessageType('Interest', (_message.Message,), dict(
+ DESCRIPTOR = _INTEREST,
+ __module__ = 'events_pb2'
+ # @@protoc_insertion_point(class_scope:protos.Interest)
+ ))
+_sym_db.RegisterMessage(Interest)
+
+Register = _reflection.GeneratedProtocolMessageType('Register', (_message.Message,), dict(
+ DESCRIPTOR = _REGISTER,
+ __module__ = 'events_pb2'
+ # @@protoc_insertion_point(class_scope:protos.Register)
+ ))
+_sym_db.RegisterMessage(Register)
+
+Generic = _reflection.GeneratedProtocolMessageType('Generic', (_message.Message,), dict(
+ DESCRIPTOR = _GENERIC,
+ __module__ = 'events_pb2'
+ # @@protoc_insertion_point(class_scope:protos.Generic)
+ ))
+_sym_db.RegisterMessage(Generic)
+
+Event = _reflection.GeneratedProtocolMessageType('Event', (_message.Message,), dict(
+ DESCRIPTOR = _EVENT,
+ __module__ = 'events_pb2'
+ # @@protoc_insertion_point(class_scope:protos.Event)
+ ))
+_sym_db.RegisterMessage(Event)
+
+
+import abc
+import six
+from grpc.beta import implementations as beta_implementations
+from grpc.beta import interfaces as beta_interfaces
+from grpc.framework.common import cardinality
+from grpc.framework.interfaces.face import utilities as face_utilities
+
+class BetaEventsServicer(object):
+ """Interface exported by the events server
+ """
+ def Chat(self, request_iterator, context):
+ """event chatting using Event
+ """
+ context.code(beta_interfaces.StatusCode.UNIMPLEMENTED)
+
+class BetaEventsStub(object):
+ """Interface exported by the events server
+ """
+ def Chat(self, request_iterator, timeout):
+ """event chatting using Event
+ """
+ raise NotImplementedError()
+
+def beta_create_Events_server(servicer, pool=None, pool_size=None, default_timeout=None, maximum_timeout=None):
+ import events_pb2
+ import events_pb2
+ request_deserializers = {
+ ('protos.Events', 'Chat'): events_pb2.Event.FromString,
+ }
+ response_serializers = {
+ ('protos.Events', 'Chat'): events_pb2.Event.SerializeToString,
+ }
+ method_implementations = {
+ ('protos.Events', 'Chat'): face_utilities.stream_stream_inline(servicer.Chat),
+ }
+ server_options = beta_implementations.server_options(request_deserializers=request_deserializers, response_serializers=response_serializers, thread_pool=pool, thread_pool_size=pool_size, default_timeout=default_timeout, maximum_timeout=maximum_timeout)
+ return beta_implementations.server(method_implementations, options=server_options)
+
+def beta_create_Events_stub(channel, host=None, metadata_transformer=None, pool=None, pool_size=None):
+ import events_pb2
+ import events_pb2
+ request_serializers = {
+ ('protos.Events', 'Chat'): events_pb2.Event.SerializeToString,
+ }
+ response_deserializers = {
+ ('protos.Events', 'Chat'): events_pb2.Event.FromString,
+ }
+ cardinalities = {
+ 'Chat': cardinality.Cardinality.STREAM_STREAM,
+ }
+ stub_options = beta_implementations.stub_options(host=host, metadata_transformer=metadata_transformer, request_serializers=request_serializers, response_deserializers=response_deserializers, thread_pool=pool, thread_pool_size=pool_size)
+ return beta_implementations.dynamic_stub(channel, 'protos.Events', cardinalities, options=stub_options)
+# @@protoc_insertion_point(module_scope)
diff --git a/bddtests/fabric_pb2.py b/bddtests/fabric_pb2.py
new file mode 100644
index 00000000000..b3b87b7ad5b
--- /dev/null
+++ b/bddtests/fabric_pb2.py
@@ -0,0 +1,1354 @@
+# Generated by the protocol buffer compiler. DO NOT EDIT!
+# source: fabric.proto
+
+import sys
+_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
+from google.protobuf import descriptor as _descriptor
+from google.protobuf import message as _message
+from google.protobuf import reflection as _reflection
+from google.protobuf import symbol_database as _symbol_database
+from google.protobuf import descriptor_pb2
+# @@protoc_insertion_point(imports)
+
+_sym_db = _symbol_database.Default()
+
+
+import chaincode_pb2 as chaincode__pb2
+from google.protobuf import timestamp_pb2 as google_dot_protobuf_dot_timestamp__pb2
+
+
+DESCRIPTOR = _descriptor.FileDescriptor(
+ name='fabric.proto',
+ package='protos',
+ syntax='proto3',
+ serialized_pb=_b('\n\x0c\x66\x61\x62ric.proto\x12\x06protos\x1a\x0f\x63haincode.proto\x1a\x1fgoogle/protobuf/timestamp.proto\"\xc5\x03\n\x0bTransaction\x12&\n\x04type\x18\x01 \x01(\x0e\x32\x18.protos.Transaction.Type\x12\x13\n\x0b\x63haincodeID\x18\x02 \x01(\x0c\x12\x0f\n\x07payload\x18\x03 \x01(\x0c\x12\x10\n\x08metadata\x18\x04 \x01(\x0c\x12\x0c\n\x04uuid\x18\x05 \x01(\t\x12-\n\ttimestamp\x18\x06 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12:\n\x14\x63onfidentialityLevel\x18\x07 \x01(\x0e\x32\x1c.protos.ConfidentialityLevel\x12&\n\x1e\x63onfidentialityProtocolVersion\x18\x08 \x01(\t\x12\r\n\x05nonce\x18\t \x01(\x0c\x12\x14\n\x0ctoValidators\x18\n \x01(\x0c\x12\x0c\n\x04\x63\x65rt\x18\x0b \x01(\x0c\x12\x11\n\tsignature\x18\x0c \x01(\x0c\"o\n\x04Type\x12\r\n\tUNDEFINED\x10\x00\x12\x14\n\x10\x43HAINCODE_DEPLOY\x10\x01\x12\x14\n\x10\x43HAINCODE_INVOKE\x10\x02\x12\x13\n\x0f\x43HAINCODE_QUERY\x10\x03\x12\x17\n\x13\x43HAINCODE_TERMINATE\x10\x04\"=\n\x10TransactionBlock\x12)\n\x0ctransactions\x18\x01 \x03(\x0b\x32\x13.protos.Transaction\"S\n\x11TransactionResult\x12\x0c\n\x04uuid\x18\x01 \x01(\t\x12\x0e\n\x06result\x18\x02 \x01(\x0c\x12\x11\n\terrorCode\x18\x03 \x01(\r\x12\r\n\x05\x65rror\x18\x04 \x01(\t\"\xe5\x01\n\x05\x42lock\x12\x0f\n\x07version\x18\x01 \x01(\r\x12-\n\ttimestamp\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12)\n\x0ctransactions\x18\x03 \x03(\x0b\x32\x13.protos.Transaction\x12\x11\n\tstateHash\x18\x04 \x01(\x0c\x12\x19\n\x11previousBlockHash\x18\x05 \x01(\x0c\x12\x19\n\x11\x63onsensusMetadata\x18\x06 \x01(\x0c\x12(\n\x0bnonHashData\x18\x07 \x01(\x0b\x32\x13.protos.NonHashData\"U\n\x0e\x42lockchainInfo\x12\x0e\n\x06height\x18\x01 \x01(\x04\x12\x18\n\x10\x63urrentBlockHash\x18\x02 \x01(\x0c\x12\x19\n\x11previousBlockHash\x18\x03 \x01(\x0c\"\x84\x01\n\x0bNonHashData\x12>\n\x1alocalLedgerCommitTimestamp\x18\x01 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x35\n\x12transactionResults\x18\x02 \x03(\x0b\x32\x19.protos.TransactionResult\")\n\x0bPeerAddress\x12\x0c\n\x04host\x18\x01 \x01(\t\x12\x0c\n\x04port\x18\x02 \x01(\x05\"\x16\n\x06PeerID\x12\x0c\n\x04name\x18\x01 \x01(\t\"\xac\x01\n\x0cPeerEndpoint\x12\x1a\n\x02ID\x18\x01 \x01(\x0b\x32\x0e.protos.PeerID\x12\x0f\n\x07\x61\x64\x64ress\x18\x02 \x01(\t\x12\'\n\x04type\x18\x03 \x01(\x0e\x32\x19.protos.PeerEndpoint.Type\x12\r\n\x05pkiID\x18\x04 \x01(\x0c\"7\n\x04Type\x12\r\n\tUNDEFINED\x10\x00\x12\r\n\tVALIDATOR\x10\x01\x12\x11\n\rNON_VALIDATOR\x10\x02\"3\n\x0cPeersMessage\x12#\n\x05peers\x18\x01 \x03(\x0b\x32\x14.protos.PeerEndpoint\"j\n\x0cHelloMessage\x12*\n\x0cpeerEndpoint\x18\x01 \x01(\x0b\x32\x14.protos.PeerEndpoint\x12.\n\x0e\x62lockchainInfo\x18\x02 \x01(\x0b\x32\x16.protos.BlockchainInfo\"\xca\x03\n\x07Message\x12\"\n\x04type\x18\x01 \x01(\x0e\x32\x14.protos.Message.Type\x12-\n\ttimestamp\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x0f\n\x07payload\x18\x03 \x01(\x0c\x12\x11\n\tsignature\x18\x04 \x01(\x0c\"\xc7\x02\n\x04Type\x12\r\n\tUNDEFINED\x10\x00\x12\x0e\n\nDISC_HELLO\x10\x01\x12\x13\n\x0f\x44ISC_DISCONNECT\x10\x02\x12\x12\n\x0e\x44ISC_GET_PEERS\x10\x03\x12\x0e\n\nDISC_PEERS\x10\x04\x12\x0f\n\x0b\x44ISC_NEWMSG\x10\x05\x12\x15\n\x11\x43HAIN_TRANSACTION\x10\x06\x12\x13\n\x0fSYNC_GET_BLOCKS\x10\x0b\x12\x0f\n\x0bSYNC_BLOCKS\x10\x0c\x12\x14\n\x10SYNC_BLOCK_ADDED\x10\r\x12\x1b\n\x17SYNC_STATE_GET_SNAPSHOT\x10\x0e\x12\x17\n\x13SYNC_STATE_SNAPSHOT\x10\x0f\x12\x19\n\x15SYNC_STATE_GET_DELTAS\x10\x10\x12\x15\n\x11SYNC_STATE_DELTAS\x10\x11\x12\x0c\n\x08RESPONSE\x10\x14\x12\r\n\tCONSENSUS\x10\x15\"}\n\x08Response\x12+\n\x06status\x18\x01 \x01(\x0e\x32\x1b.protos.Response.StatusCode\x12\x0b\n\x03msg\x18\x02 \x01(\x0c\"7\n\nStatusCode\x12\r\n\tUNDEFINED\x10\x00\x12\x0c\n\x07SUCCESS\x10\xc8\x01\x12\x0c\n\x07\x46\x41ILURE\x10\xf4\x03\">\n\nBlockState\x12\x1c\n\x05\x62lock\x18\x01 \x01(\x0b\x32\r.protos.Block\x12\x12\n\nstateDelta\x18\x02 \x01(\x0c\",\n\x0eSyncBlockRange\x12\r\n\x05start\x18\x01 \x01(\x04\x12\x0b\n\x03\x65nd\x18\x02 \x01(\x04\"R\n\nSyncBlocks\x12%\n\x05range\x18\x01 \x01(\x0b\x32\x16.protos.SyncBlockRange\x12\x1d\n\x06\x62locks\x18\x02 \x03(\x0b\x32\r.protos.Block\"1\n\x18SyncStateSnapshotRequest\x12\x15\n\rcorrelationId\x18\x01 \x01(\x04\"|\n\x11SyncStateSnapshot\x12\r\n\x05\x64\x65lta\x18\x01 \x01(\x0c\x12\x10\n\x08sequence\x18\x02 \x01(\x04\x12\x13\n\x0b\x62lockNumber\x18\x03 \x01(\x04\x12\x31\n\x07request\x18\x04 \x01(\x0b\x32 .protos.SyncStateSnapshotRequest\"?\n\x16SyncStateDeltasRequest\x12%\n\x05range\x18\x01 \x01(\x0b\x32\x16.protos.SyncBlockRange\"H\n\x0fSyncStateDeltas\x12%\n\x05range\x18\x01 \x01(\x0b\x32\x16.protos.SyncBlockRange\x12\x0e\n\x06\x64\x65ltas\x18\x02 \x03(\x0c\x32u\n\x04Peer\x12.\n\x04\x43hat\x12\x0f.protos.Message\x1a\x0f.protos.Message\"\x00(\x01\x30\x01\x12=\n\x12ProcessTransaction\x12\x13.protos.Transaction\x1a\x10.protos.Response\"\x00\x62\x06proto3')
+ ,
+ dependencies=[chaincode__pb2.DESCRIPTOR,google_dot_protobuf_dot_timestamp__pb2.DESCRIPTOR,])
+_sym_db.RegisterFileDescriptor(DESCRIPTOR)
+
+
+
+_TRANSACTION_TYPE = _descriptor.EnumDescriptor(
+ name='Type',
+ full_name='protos.Transaction.Type',
+ filename=None,
+ file=DESCRIPTOR,
+ values=[
+ _descriptor.EnumValueDescriptor(
+ name='UNDEFINED', index=0, number=0,
+ options=None,
+ type=None),
+ _descriptor.EnumValueDescriptor(
+ name='CHAINCODE_DEPLOY', index=1, number=1,
+ options=None,
+ type=None),
+ _descriptor.EnumValueDescriptor(
+ name='CHAINCODE_INVOKE', index=2, number=2,
+ options=None,
+ type=None),
+ _descriptor.EnumValueDescriptor(
+ name='CHAINCODE_QUERY', index=3, number=3,
+ options=None,
+ type=None),
+ _descriptor.EnumValueDescriptor(
+ name='CHAINCODE_TERMINATE', index=4, number=4,
+ options=None,
+ type=None),
+ ],
+ containing_type=None,
+ options=None,
+ serialized_start=417,
+ serialized_end=528,
+)
+_sym_db.RegisterEnumDescriptor(_TRANSACTION_TYPE)
+
+_PEERENDPOINT_TYPE = _descriptor.EnumDescriptor(
+ name='Type',
+ full_name='protos.PeerEndpoint.Type',
+ filename=None,
+ file=DESCRIPTOR,
+ values=[
+ _descriptor.EnumValueDescriptor(
+ name='UNDEFINED', index=0, number=0,
+ options=None,
+ type=None),
+ _descriptor.EnumValueDescriptor(
+ name='VALIDATOR', index=1, number=1,
+ options=None,
+ type=None),
+ _descriptor.EnumValueDescriptor(
+ name='NON_VALIDATOR', index=2, number=2,
+ options=None,
+ type=None),
+ ],
+ containing_type=None,
+ options=None,
+ serialized_start=1317,
+ serialized_end=1372,
+)
+_sym_db.RegisterEnumDescriptor(_PEERENDPOINT_TYPE)
+
+_MESSAGE_TYPE = _descriptor.EnumDescriptor(
+ name='Type',
+ full_name='protos.Message.Type',
+ filename=None,
+ file=DESCRIPTOR,
+ values=[
+ _descriptor.EnumValueDescriptor(
+ name='UNDEFINED', index=0, number=0,
+ options=None,
+ type=None),
+ _descriptor.EnumValueDescriptor(
+ name='DISC_HELLO', index=1, number=1,
+ options=None,
+ type=None),
+ _descriptor.EnumValueDescriptor(
+ name='DISC_DISCONNECT', index=2, number=2,
+ options=None,
+ type=None),
+ _descriptor.EnumValueDescriptor(
+ name='DISC_GET_PEERS', index=3, number=3,
+ options=None,
+ type=None),
+ _descriptor.EnumValueDescriptor(
+ name='DISC_PEERS', index=4, number=4,
+ options=None,
+ type=None),
+ _descriptor.EnumValueDescriptor(
+ name='DISC_NEWMSG', index=5, number=5,
+ options=None,
+ type=None),
+ _descriptor.EnumValueDescriptor(
+ name='CHAIN_TRANSACTION', index=6, number=6,
+ options=None,
+ type=None),
+ _descriptor.EnumValueDescriptor(
+ name='SYNC_GET_BLOCKS', index=7, number=11,
+ options=None,
+ type=None),
+ _descriptor.EnumValueDescriptor(
+ name='SYNC_BLOCKS', index=8, number=12,
+ options=None,
+ type=None),
+ _descriptor.EnumValueDescriptor(
+ name='SYNC_BLOCK_ADDED', index=9, number=13,
+ options=None,
+ type=None),
+ _descriptor.EnumValueDescriptor(
+ name='SYNC_STATE_GET_SNAPSHOT', index=10, number=14,
+ options=None,
+ type=None),
+ _descriptor.EnumValueDescriptor(
+ name='SYNC_STATE_SNAPSHOT', index=11, number=15,
+ options=None,
+ type=None),
+ _descriptor.EnumValueDescriptor(
+ name='SYNC_STATE_GET_DELTAS', index=12, number=16,
+ options=None,
+ type=None),
+ _descriptor.EnumValueDescriptor(
+ name='SYNC_STATE_DELTAS', index=13, number=17,
+ options=None,
+ type=None),
+ _descriptor.EnumValueDescriptor(
+ name='RESPONSE', index=14, number=20,
+ options=None,
+ type=None),
+ _descriptor.EnumValueDescriptor(
+ name='CONSENSUS', index=15, number=21,
+ options=None,
+ type=None),
+ ],
+ containing_type=None,
+ options=None,
+ serialized_start=1667,
+ serialized_end=1994,
+)
+_sym_db.RegisterEnumDescriptor(_MESSAGE_TYPE)
+
+_RESPONSE_STATUSCODE = _descriptor.EnumDescriptor(
+ name='StatusCode',
+ full_name='protos.Response.StatusCode',
+ filename=None,
+ file=DESCRIPTOR,
+ values=[
+ _descriptor.EnumValueDescriptor(
+ name='UNDEFINED', index=0, number=0,
+ options=None,
+ type=None),
+ _descriptor.EnumValueDescriptor(
+ name='SUCCESS', index=1, number=200,
+ options=None,
+ type=None),
+ _descriptor.EnumValueDescriptor(
+ name='FAILURE', index=2, number=500,
+ options=None,
+ type=None),
+ ],
+ containing_type=None,
+ options=None,
+ serialized_start=2066,
+ serialized_end=2121,
+)
+_sym_db.RegisterEnumDescriptor(_RESPONSE_STATUSCODE)
+
+
+_TRANSACTION = _descriptor.Descriptor(
+ name='Transaction',
+ full_name='protos.Transaction',
+ filename=None,
+ file=DESCRIPTOR,
+ containing_type=None,
+ fields=[
+ _descriptor.FieldDescriptor(
+ name='type', full_name='protos.Transaction.type', index=0,
+ number=1, type=14, cpp_type=8, label=1,
+ has_default_value=False, default_value=0,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None),
+ _descriptor.FieldDescriptor(
+ name='chaincodeID', full_name='protos.Transaction.chaincodeID', index=1,
+ number=2, type=12, cpp_type=9, label=1,
+ has_default_value=False, default_value=_b(""),
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None),
+ _descriptor.FieldDescriptor(
+ name='payload', full_name='protos.Transaction.payload', index=2,
+ number=3, type=12, cpp_type=9, label=1,
+ has_default_value=False, default_value=_b(""),
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None),
+ _descriptor.FieldDescriptor(
+ name='metadata', full_name='protos.Transaction.metadata', index=3,
+ number=4, type=12, cpp_type=9, label=1,
+ has_default_value=False, default_value=_b(""),
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None),
+ _descriptor.FieldDescriptor(
+ name='uuid', full_name='protos.Transaction.uuid', index=4,
+ number=5, type=9, cpp_type=9, label=1,
+ has_default_value=False, default_value=_b("").decode('utf-8'),
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None),
+ _descriptor.FieldDescriptor(
+ name='timestamp', full_name='protos.Transaction.timestamp', index=5,
+ number=6, type=11, cpp_type=10, label=1,
+ has_default_value=False, default_value=None,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None),
+ _descriptor.FieldDescriptor(
+ name='confidentialityLevel', full_name='protos.Transaction.confidentialityLevel', index=6,
+ number=7, type=14, cpp_type=8, label=1,
+ has_default_value=False, default_value=0,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None),
+ _descriptor.FieldDescriptor(
+ name='confidentialityProtocolVersion', full_name='protos.Transaction.confidentialityProtocolVersion', index=7,
+ number=8, type=9, cpp_type=9, label=1,
+ has_default_value=False, default_value=_b("").decode('utf-8'),
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None),
+ _descriptor.FieldDescriptor(
+ name='nonce', full_name='protos.Transaction.nonce', index=8,
+ number=9, type=12, cpp_type=9, label=1,
+ has_default_value=False, default_value=_b(""),
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None),
+ _descriptor.FieldDescriptor(
+ name='toValidators', full_name='protos.Transaction.toValidators', index=9,
+ number=10, type=12, cpp_type=9, label=1,
+ has_default_value=False, default_value=_b(""),
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None),
+ _descriptor.FieldDescriptor(
+ name='cert', full_name='protos.Transaction.cert', index=10,
+ number=11, type=12, cpp_type=9, label=1,
+ has_default_value=False, default_value=_b(""),
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None),
+ _descriptor.FieldDescriptor(
+ name='signature', full_name='protos.Transaction.signature', index=11,
+ number=12, type=12, cpp_type=9, label=1,
+ has_default_value=False, default_value=_b(""),
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None),
+ ],
+ extensions=[
+ ],
+ nested_types=[],
+ enum_types=[
+ _TRANSACTION_TYPE,
+ ],
+ options=None,
+ is_extendable=False,
+ syntax='proto3',
+ extension_ranges=[],
+ oneofs=[
+ ],
+ serialized_start=75,
+ serialized_end=528,
+)
+
+
+_TRANSACTIONBLOCK = _descriptor.Descriptor(
+ name='TransactionBlock',
+ full_name='protos.TransactionBlock',
+ filename=None,
+ file=DESCRIPTOR,
+ containing_type=None,
+ fields=[
+ _descriptor.FieldDescriptor(
+ name='transactions', full_name='protos.TransactionBlock.transactions', index=0,
+ number=1, type=11, cpp_type=10, label=3,
+ has_default_value=False, default_value=[],
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None),
+ ],
+ extensions=[
+ ],
+ nested_types=[],
+ enum_types=[
+ ],
+ options=None,
+ is_extendable=False,
+ syntax='proto3',
+ extension_ranges=[],
+ oneofs=[
+ ],
+ serialized_start=530,
+ serialized_end=591,
+)
+
+
+_TRANSACTIONRESULT = _descriptor.Descriptor(
+ name='TransactionResult',
+ full_name='protos.TransactionResult',
+ filename=None,
+ file=DESCRIPTOR,
+ containing_type=None,
+ fields=[
+ _descriptor.FieldDescriptor(
+ name='uuid', full_name='protos.TransactionResult.uuid', index=0,
+ number=1, type=9, cpp_type=9, label=1,
+ has_default_value=False, default_value=_b("").decode('utf-8'),
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None),
+ _descriptor.FieldDescriptor(
+ name='result', full_name='protos.TransactionResult.result', index=1,
+ number=2, type=12, cpp_type=9, label=1,
+ has_default_value=False, default_value=_b(""),
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None),
+ _descriptor.FieldDescriptor(
+ name='errorCode', full_name='protos.TransactionResult.errorCode', index=2,
+ number=3, type=13, cpp_type=3, label=1,
+ has_default_value=False, default_value=0,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None),
+ _descriptor.FieldDescriptor(
+ name='error', full_name='protos.TransactionResult.error', index=3,
+ number=4, type=9, cpp_type=9, label=1,
+ has_default_value=False, default_value=_b("").decode('utf-8'),
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None),
+ ],
+ extensions=[
+ ],
+ nested_types=[],
+ enum_types=[
+ ],
+ options=None,
+ is_extendable=False,
+ syntax='proto3',
+ extension_ranges=[],
+ oneofs=[
+ ],
+ serialized_start=593,
+ serialized_end=676,
+)
+
+
+_BLOCK = _descriptor.Descriptor(
+ name='Block',
+ full_name='protos.Block',
+ filename=None,
+ file=DESCRIPTOR,
+ containing_type=None,
+ fields=[
+ _descriptor.FieldDescriptor(
+ name='version', full_name='protos.Block.version', index=0,
+ number=1, type=13, cpp_type=3, label=1,
+ has_default_value=False, default_value=0,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None),
+ _descriptor.FieldDescriptor(
+ name='timestamp', full_name='protos.Block.timestamp', index=1,
+ number=2, type=11, cpp_type=10, label=1,
+ has_default_value=False, default_value=None,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None),
+ _descriptor.FieldDescriptor(
+ name='transactions', full_name='protos.Block.transactions', index=2,
+ number=3, type=11, cpp_type=10, label=3,
+ has_default_value=False, default_value=[],
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None),
+ _descriptor.FieldDescriptor(
+ name='stateHash', full_name='protos.Block.stateHash', index=3,
+ number=4, type=12, cpp_type=9, label=1,
+ has_default_value=False, default_value=_b(""),
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None),
+ _descriptor.FieldDescriptor(
+ name='previousBlockHash', full_name='protos.Block.previousBlockHash', index=4,
+ number=5, type=12, cpp_type=9, label=1,
+ has_default_value=False, default_value=_b(""),
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None),
+ _descriptor.FieldDescriptor(
+ name='consensusMetadata', full_name='protos.Block.consensusMetadata', index=5,
+ number=6, type=12, cpp_type=9, label=1,
+ has_default_value=False, default_value=_b(""),
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None),
+ _descriptor.FieldDescriptor(
+ name='nonHashData', full_name='protos.Block.nonHashData', index=6,
+ number=7, type=11, cpp_type=10, label=1,
+ has_default_value=False, default_value=None,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None),
+ ],
+ extensions=[
+ ],
+ nested_types=[],
+ enum_types=[
+ ],
+ options=None,
+ is_extendable=False,
+ syntax='proto3',
+ extension_ranges=[],
+ oneofs=[
+ ],
+ serialized_start=679,
+ serialized_end=908,
+)
+
+
+_BLOCKCHAININFO = _descriptor.Descriptor(
+ name='BlockchainInfo',
+ full_name='protos.BlockchainInfo',
+ filename=None,
+ file=DESCRIPTOR,
+ containing_type=None,
+ fields=[
+ _descriptor.FieldDescriptor(
+ name='height', full_name='protos.BlockchainInfo.height', index=0,
+ number=1, type=4, cpp_type=4, label=1,
+ has_default_value=False, default_value=0,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None),
+ _descriptor.FieldDescriptor(
+ name='currentBlockHash', full_name='protos.BlockchainInfo.currentBlockHash', index=1,
+ number=2, type=12, cpp_type=9, label=1,
+ has_default_value=False, default_value=_b(""),
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None),
+ _descriptor.FieldDescriptor(
+ name='previousBlockHash', full_name='protos.BlockchainInfo.previousBlockHash', index=2,
+ number=3, type=12, cpp_type=9, label=1,
+ has_default_value=False, default_value=_b(""),
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None),
+ ],
+ extensions=[
+ ],
+ nested_types=[],
+ enum_types=[
+ ],
+ options=None,
+ is_extendable=False,
+ syntax='proto3',
+ extension_ranges=[],
+ oneofs=[
+ ],
+ serialized_start=910,
+ serialized_end=995,
+)
+
+
+_NONHASHDATA = _descriptor.Descriptor(
+ name='NonHashData',
+ full_name='protos.NonHashData',
+ filename=None,
+ file=DESCRIPTOR,
+ containing_type=None,
+ fields=[
+ _descriptor.FieldDescriptor(
+ name='localLedgerCommitTimestamp', full_name='protos.NonHashData.localLedgerCommitTimestamp', index=0,
+ number=1, type=11, cpp_type=10, label=1,
+ has_default_value=False, default_value=None,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None),
+ _descriptor.FieldDescriptor(
+ name='transactionResults', full_name='protos.NonHashData.transactionResults', index=1,
+ number=2, type=11, cpp_type=10, label=3,
+ has_default_value=False, default_value=[],
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None),
+ ],
+ extensions=[
+ ],
+ nested_types=[],
+ enum_types=[
+ ],
+ options=None,
+ is_extendable=False,
+ syntax='proto3',
+ extension_ranges=[],
+ oneofs=[
+ ],
+ serialized_start=998,
+ serialized_end=1130,
+)
+
+
+_PEERADDRESS = _descriptor.Descriptor(
+ name='PeerAddress',
+ full_name='protos.PeerAddress',
+ filename=None,
+ file=DESCRIPTOR,
+ containing_type=None,
+ fields=[
+ _descriptor.FieldDescriptor(
+ name='host', full_name='protos.PeerAddress.host', index=0,
+ number=1, type=9, cpp_type=9, label=1,
+ has_default_value=False, default_value=_b("").decode('utf-8'),
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None),
+ _descriptor.FieldDescriptor(
+ name='port', full_name='protos.PeerAddress.port', index=1,
+ number=2, type=5, cpp_type=1, label=1,
+ has_default_value=False, default_value=0,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None),
+ ],
+ extensions=[
+ ],
+ nested_types=[],
+ enum_types=[
+ ],
+ options=None,
+ is_extendable=False,
+ syntax='proto3',
+ extension_ranges=[],
+ oneofs=[
+ ],
+ serialized_start=1132,
+ serialized_end=1173,
+)
+
+
+_PEERID = _descriptor.Descriptor(
+ name='PeerID',
+ full_name='protos.PeerID',
+ filename=None,
+ file=DESCRIPTOR,
+ containing_type=None,
+ fields=[
+ _descriptor.FieldDescriptor(
+ name='name', full_name='protos.PeerID.name', index=0,
+ number=1, type=9, cpp_type=9, label=1,
+ has_default_value=False, default_value=_b("").decode('utf-8'),
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None),
+ ],
+ extensions=[
+ ],
+ nested_types=[],
+ enum_types=[
+ ],
+ options=None,
+ is_extendable=False,
+ syntax='proto3',
+ extension_ranges=[],
+ oneofs=[
+ ],
+ serialized_start=1175,
+ serialized_end=1197,
+)
+
+
+_PEERENDPOINT = _descriptor.Descriptor(
+ name='PeerEndpoint',
+ full_name='protos.PeerEndpoint',
+ filename=None,
+ file=DESCRIPTOR,
+ containing_type=None,
+ fields=[
+ _descriptor.FieldDescriptor(
+ name='ID', full_name='protos.PeerEndpoint.ID', index=0,
+ number=1, type=11, cpp_type=10, label=1,
+ has_default_value=False, default_value=None,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None),
+ _descriptor.FieldDescriptor(
+ name='address', full_name='protos.PeerEndpoint.address', index=1,
+ number=2, type=9, cpp_type=9, label=1,
+ has_default_value=False, default_value=_b("").decode('utf-8'),
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None),
+ _descriptor.FieldDescriptor(
+ name='type', full_name='protos.PeerEndpoint.type', index=2,
+ number=3, type=14, cpp_type=8, label=1,
+ has_default_value=False, default_value=0,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None),
+ _descriptor.FieldDescriptor(
+ name='pkiID', full_name='protos.PeerEndpoint.pkiID', index=3,
+ number=4, type=12, cpp_type=9, label=1,
+ has_default_value=False, default_value=_b(""),
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None),
+ ],
+ extensions=[
+ ],
+ nested_types=[],
+ enum_types=[
+ _PEERENDPOINT_TYPE,
+ ],
+ options=None,
+ is_extendable=False,
+ syntax='proto3',
+ extension_ranges=[],
+ oneofs=[
+ ],
+ serialized_start=1200,
+ serialized_end=1372,
+)
+
+
+_PEERSMESSAGE = _descriptor.Descriptor(
+ name='PeersMessage',
+ full_name='protos.PeersMessage',
+ filename=None,
+ file=DESCRIPTOR,
+ containing_type=None,
+ fields=[
+ _descriptor.FieldDescriptor(
+ name='peers', full_name='protos.PeersMessage.peers', index=0,
+ number=1, type=11, cpp_type=10, label=3,
+ has_default_value=False, default_value=[],
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None),
+ ],
+ extensions=[
+ ],
+ nested_types=[],
+ enum_types=[
+ ],
+ options=None,
+ is_extendable=False,
+ syntax='proto3',
+ extension_ranges=[],
+ oneofs=[
+ ],
+ serialized_start=1374,
+ serialized_end=1425,
+)
+
+
+_HELLOMESSAGE = _descriptor.Descriptor(
+ name='HelloMessage',
+ full_name='protos.HelloMessage',
+ filename=None,
+ file=DESCRIPTOR,
+ containing_type=None,
+ fields=[
+ _descriptor.FieldDescriptor(
+ name='peerEndpoint', full_name='protos.HelloMessage.peerEndpoint', index=0,
+ number=1, type=11, cpp_type=10, label=1,
+ has_default_value=False, default_value=None,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None),
+ _descriptor.FieldDescriptor(
+ name='blockchainInfo', full_name='protos.HelloMessage.blockchainInfo', index=1,
+ number=2, type=11, cpp_type=10, label=1,
+ has_default_value=False, default_value=None,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None),
+ ],
+ extensions=[
+ ],
+ nested_types=[],
+ enum_types=[
+ ],
+ options=None,
+ is_extendable=False,
+ syntax='proto3',
+ extension_ranges=[],
+ oneofs=[
+ ],
+ serialized_start=1427,
+ serialized_end=1533,
+)
+
+
+_MESSAGE = _descriptor.Descriptor(
+ name='Message',
+ full_name='protos.Message',
+ filename=None,
+ file=DESCRIPTOR,
+ containing_type=None,
+ fields=[
+ _descriptor.FieldDescriptor(
+ name='type', full_name='protos.Message.type', index=0,
+ number=1, type=14, cpp_type=8, label=1,
+ has_default_value=False, default_value=0,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None),
+ _descriptor.FieldDescriptor(
+ name='timestamp', full_name='protos.Message.timestamp', index=1,
+ number=2, type=11, cpp_type=10, label=1,
+ has_default_value=False, default_value=None,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None),
+ _descriptor.FieldDescriptor(
+ name='payload', full_name='protos.Message.payload', index=2,
+ number=3, type=12, cpp_type=9, label=1,
+ has_default_value=False, default_value=_b(""),
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None),
+ _descriptor.FieldDescriptor(
+ name='signature', full_name='protos.Message.signature', index=3,
+ number=4, type=12, cpp_type=9, label=1,
+ has_default_value=False, default_value=_b(""),
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None),
+ ],
+ extensions=[
+ ],
+ nested_types=[],
+ enum_types=[
+ _MESSAGE_TYPE,
+ ],
+ options=None,
+ is_extendable=False,
+ syntax='proto3',
+ extension_ranges=[],
+ oneofs=[
+ ],
+ serialized_start=1536,
+ serialized_end=1994,
+)
+
+
+_RESPONSE = _descriptor.Descriptor(
+ name='Response',
+ full_name='protos.Response',
+ filename=None,
+ file=DESCRIPTOR,
+ containing_type=None,
+ fields=[
+ _descriptor.FieldDescriptor(
+ name='status', full_name='protos.Response.status', index=0,
+ number=1, type=14, cpp_type=8, label=1,
+ has_default_value=False, default_value=0,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None),
+ _descriptor.FieldDescriptor(
+ name='msg', full_name='protos.Response.msg', index=1,
+ number=2, type=12, cpp_type=9, label=1,
+ has_default_value=False, default_value=_b(""),
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None),
+ ],
+ extensions=[
+ ],
+ nested_types=[],
+ enum_types=[
+ _RESPONSE_STATUSCODE,
+ ],
+ options=None,
+ is_extendable=False,
+ syntax='proto3',
+ extension_ranges=[],
+ oneofs=[
+ ],
+ serialized_start=1996,
+ serialized_end=2121,
+)
+
+
+_BLOCKSTATE = _descriptor.Descriptor(
+ name='BlockState',
+ full_name='protos.BlockState',
+ filename=None,
+ file=DESCRIPTOR,
+ containing_type=None,
+ fields=[
+ _descriptor.FieldDescriptor(
+ name='block', full_name='protos.BlockState.block', index=0,
+ number=1, type=11, cpp_type=10, label=1,
+ has_default_value=False, default_value=None,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None),
+ _descriptor.FieldDescriptor(
+ name='stateDelta', full_name='protos.BlockState.stateDelta', index=1,
+ number=2, type=12, cpp_type=9, label=1,
+ has_default_value=False, default_value=_b(""),
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None),
+ ],
+ extensions=[
+ ],
+ nested_types=[],
+ enum_types=[
+ ],
+ options=None,
+ is_extendable=False,
+ syntax='proto3',
+ extension_ranges=[],
+ oneofs=[
+ ],
+ serialized_start=2123,
+ serialized_end=2185,
+)
+
+
+_SYNCBLOCKRANGE = _descriptor.Descriptor(
+ name='SyncBlockRange',
+ full_name='protos.SyncBlockRange',
+ filename=None,
+ file=DESCRIPTOR,
+ containing_type=None,
+ fields=[
+ _descriptor.FieldDescriptor(
+ name='start', full_name='protos.SyncBlockRange.start', index=0,
+ number=1, type=4, cpp_type=4, label=1,
+ has_default_value=False, default_value=0,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None),
+ _descriptor.FieldDescriptor(
+ name='end', full_name='protos.SyncBlockRange.end', index=1,
+ number=2, type=4, cpp_type=4, label=1,
+ has_default_value=False, default_value=0,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None),
+ ],
+ extensions=[
+ ],
+ nested_types=[],
+ enum_types=[
+ ],
+ options=None,
+ is_extendable=False,
+ syntax='proto3',
+ extension_ranges=[],
+ oneofs=[
+ ],
+ serialized_start=2187,
+ serialized_end=2231,
+)
+
+
+_SYNCBLOCKS = _descriptor.Descriptor(
+ name='SyncBlocks',
+ full_name='protos.SyncBlocks',
+ filename=None,
+ file=DESCRIPTOR,
+ containing_type=None,
+ fields=[
+ _descriptor.FieldDescriptor(
+ name='range', full_name='protos.SyncBlocks.range', index=0,
+ number=1, type=11, cpp_type=10, label=1,
+ has_default_value=False, default_value=None,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None),
+ _descriptor.FieldDescriptor(
+ name='blocks', full_name='protos.SyncBlocks.blocks', index=1,
+ number=2, type=11, cpp_type=10, label=3,
+ has_default_value=False, default_value=[],
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None),
+ ],
+ extensions=[
+ ],
+ nested_types=[],
+ enum_types=[
+ ],
+ options=None,
+ is_extendable=False,
+ syntax='proto3',
+ extension_ranges=[],
+ oneofs=[
+ ],
+ serialized_start=2233,
+ serialized_end=2315,
+)
+
+
+_SYNCSTATESNAPSHOTREQUEST = _descriptor.Descriptor(
+ name='SyncStateSnapshotRequest',
+ full_name='protos.SyncStateSnapshotRequest',
+ filename=None,
+ file=DESCRIPTOR,
+ containing_type=None,
+ fields=[
+ _descriptor.FieldDescriptor(
+ name='correlationId', full_name='protos.SyncStateSnapshotRequest.correlationId', index=0,
+ number=1, type=4, cpp_type=4, label=1,
+ has_default_value=False, default_value=0,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None),
+ ],
+ extensions=[
+ ],
+ nested_types=[],
+ enum_types=[
+ ],
+ options=None,
+ is_extendable=False,
+ syntax='proto3',
+ extension_ranges=[],
+ oneofs=[
+ ],
+ serialized_start=2317,
+ serialized_end=2366,
+)
+
+
+_SYNCSTATESNAPSHOT = _descriptor.Descriptor(
+ name='SyncStateSnapshot',
+ full_name='protos.SyncStateSnapshot',
+ filename=None,
+ file=DESCRIPTOR,
+ containing_type=None,
+ fields=[
+ _descriptor.FieldDescriptor(
+ name='delta', full_name='protos.SyncStateSnapshot.delta', index=0,
+ number=1, type=12, cpp_type=9, label=1,
+ has_default_value=False, default_value=_b(""),
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None),
+ _descriptor.FieldDescriptor(
+ name='sequence', full_name='protos.SyncStateSnapshot.sequence', index=1,
+ number=2, type=4, cpp_type=4, label=1,
+ has_default_value=False, default_value=0,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None),
+ _descriptor.FieldDescriptor(
+ name='blockNumber', full_name='protos.SyncStateSnapshot.blockNumber', index=2,
+ number=3, type=4, cpp_type=4, label=1,
+ has_default_value=False, default_value=0,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None),
+ _descriptor.FieldDescriptor(
+ name='request', full_name='protos.SyncStateSnapshot.request', index=3,
+ number=4, type=11, cpp_type=10, label=1,
+ has_default_value=False, default_value=None,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None),
+ ],
+ extensions=[
+ ],
+ nested_types=[],
+ enum_types=[
+ ],
+ options=None,
+ is_extendable=False,
+ syntax='proto3',
+ extension_ranges=[],
+ oneofs=[
+ ],
+ serialized_start=2368,
+ serialized_end=2492,
+)
+
+
+_SYNCSTATEDELTASREQUEST = _descriptor.Descriptor(
+ name='SyncStateDeltasRequest',
+ full_name='protos.SyncStateDeltasRequest',
+ filename=None,
+ file=DESCRIPTOR,
+ containing_type=None,
+ fields=[
+ _descriptor.FieldDescriptor(
+ name='range', full_name='protos.SyncStateDeltasRequest.range', index=0,
+ number=1, type=11, cpp_type=10, label=1,
+ has_default_value=False, default_value=None,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None),
+ ],
+ extensions=[
+ ],
+ nested_types=[],
+ enum_types=[
+ ],
+ options=None,
+ is_extendable=False,
+ syntax='proto3',
+ extension_ranges=[],
+ oneofs=[
+ ],
+ serialized_start=2494,
+ serialized_end=2557,
+)
+
+
+_SYNCSTATEDELTAS = _descriptor.Descriptor(
+ name='SyncStateDeltas',
+ full_name='protos.SyncStateDeltas',
+ filename=None,
+ file=DESCRIPTOR,
+ containing_type=None,
+ fields=[
+ _descriptor.FieldDescriptor(
+ name='range', full_name='protos.SyncStateDeltas.range', index=0,
+ number=1, type=11, cpp_type=10, label=1,
+ has_default_value=False, default_value=None,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None),
+ _descriptor.FieldDescriptor(
+ name='deltas', full_name='protos.SyncStateDeltas.deltas', index=1,
+ number=2, type=12, cpp_type=9, label=3,
+ has_default_value=False, default_value=[],
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None),
+ ],
+ extensions=[
+ ],
+ nested_types=[],
+ enum_types=[
+ ],
+ options=None,
+ is_extendable=False,
+ syntax='proto3',
+ extension_ranges=[],
+ oneofs=[
+ ],
+ serialized_start=2559,
+ serialized_end=2631,
+)
+
+_TRANSACTION.fields_by_name['type'].enum_type = _TRANSACTION_TYPE
+_TRANSACTION.fields_by_name['timestamp'].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP
+_TRANSACTION.fields_by_name['confidentialityLevel'].enum_type = chaincode__pb2._CONFIDENTIALITYLEVEL
+_TRANSACTION_TYPE.containing_type = _TRANSACTION
+_TRANSACTIONBLOCK.fields_by_name['transactions'].message_type = _TRANSACTION
+_BLOCK.fields_by_name['timestamp'].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP
+_BLOCK.fields_by_name['transactions'].message_type = _TRANSACTION
+_BLOCK.fields_by_name['nonHashData'].message_type = _NONHASHDATA
+_NONHASHDATA.fields_by_name['localLedgerCommitTimestamp'].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP
+_NONHASHDATA.fields_by_name['transactionResults'].message_type = _TRANSACTIONRESULT
+_PEERENDPOINT.fields_by_name['ID'].message_type = _PEERID
+_PEERENDPOINT.fields_by_name['type'].enum_type = _PEERENDPOINT_TYPE
+_PEERENDPOINT_TYPE.containing_type = _PEERENDPOINT
+_PEERSMESSAGE.fields_by_name['peers'].message_type = _PEERENDPOINT
+_HELLOMESSAGE.fields_by_name['peerEndpoint'].message_type = _PEERENDPOINT
+_HELLOMESSAGE.fields_by_name['blockchainInfo'].message_type = _BLOCKCHAININFO
+_MESSAGE.fields_by_name['type'].enum_type = _MESSAGE_TYPE
+_MESSAGE.fields_by_name['timestamp'].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP
+_MESSAGE_TYPE.containing_type = _MESSAGE
+_RESPONSE.fields_by_name['status'].enum_type = _RESPONSE_STATUSCODE
+_RESPONSE_STATUSCODE.containing_type = _RESPONSE
+_BLOCKSTATE.fields_by_name['block'].message_type = _BLOCK
+_SYNCBLOCKS.fields_by_name['range'].message_type = _SYNCBLOCKRANGE
+_SYNCBLOCKS.fields_by_name['blocks'].message_type = _BLOCK
+_SYNCSTATESNAPSHOT.fields_by_name['request'].message_type = _SYNCSTATESNAPSHOTREQUEST
+_SYNCSTATEDELTASREQUEST.fields_by_name['range'].message_type = _SYNCBLOCKRANGE
+_SYNCSTATEDELTAS.fields_by_name['range'].message_type = _SYNCBLOCKRANGE
+DESCRIPTOR.message_types_by_name['Transaction'] = _TRANSACTION
+DESCRIPTOR.message_types_by_name['TransactionBlock'] = _TRANSACTIONBLOCK
+DESCRIPTOR.message_types_by_name['TransactionResult'] = _TRANSACTIONRESULT
+DESCRIPTOR.message_types_by_name['Block'] = _BLOCK
+DESCRIPTOR.message_types_by_name['BlockchainInfo'] = _BLOCKCHAININFO
+DESCRIPTOR.message_types_by_name['NonHashData'] = _NONHASHDATA
+DESCRIPTOR.message_types_by_name['PeerAddress'] = _PEERADDRESS
+DESCRIPTOR.message_types_by_name['PeerID'] = _PEERID
+DESCRIPTOR.message_types_by_name['PeerEndpoint'] = _PEERENDPOINT
+DESCRIPTOR.message_types_by_name['PeersMessage'] = _PEERSMESSAGE
+DESCRIPTOR.message_types_by_name['HelloMessage'] = _HELLOMESSAGE
+DESCRIPTOR.message_types_by_name['Message'] = _MESSAGE
+DESCRIPTOR.message_types_by_name['Response'] = _RESPONSE
+DESCRIPTOR.message_types_by_name['BlockState'] = _BLOCKSTATE
+DESCRIPTOR.message_types_by_name['SyncBlockRange'] = _SYNCBLOCKRANGE
+DESCRIPTOR.message_types_by_name['SyncBlocks'] = _SYNCBLOCKS
+DESCRIPTOR.message_types_by_name['SyncStateSnapshotRequest'] = _SYNCSTATESNAPSHOTREQUEST
+DESCRIPTOR.message_types_by_name['SyncStateSnapshot'] = _SYNCSTATESNAPSHOT
+DESCRIPTOR.message_types_by_name['SyncStateDeltasRequest'] = _SYNCSTATEDELTASREQUEST
+DESCRIPTOR.message_types_by_name['SyncStateDeltas'] = _SYNCSTATEDELTAS
+
+Transaction = _reflection.GeneratedProtocolMessageType('Transaction', (_message.Message,), dict(
+ DESCRIPTOR = _TRANSACTION,
+ __module__ = 'fabric_pb2'
+ # @@protoc_insertion_point(class_scope:protos.Transaction)
+ ))
+_sym_db.RegisterMessage(Transaction)
+
+TransactionBlock = _reflection.GeneratedProtocolMessageType('TransactionBlock', (_message.Message,), dict(
+ DESCRIPTOR = _TRANSACTIONBLOCK,
+ __module__ = 'fabric_pb2'
+ # @@protoc_insertion_point(class_scope:protos.TransactionBlock)
+ ))
+_sym_db.RegisterMessage(TransactionBlock)
+
+TransactionResult = _reflection.GeneratedProtocolMessageType('TransactionResult', (_message.Message,), dict(
+ DESCRIPTOR = _TRANSACTIONRESULT,
+ __module__ = 'fabric_pb2'
+ # @@protoc_insertion_point(class_scope:protos.TransactionResult)
+ ))
+_sym_db.RegisterMessage(TransactionResult)
+
+Block = _reflection.GeneratedProtocolMessageType('Block', (_message.Message,), dict(
+ DESCRIPTOR = _BLOCK,
+ __module__ = 'fabric_pb2'
+ # @@protoc_insertion_point(class_scope:protos.Block)
+ ))
+_sym_db.RegisterMessage(Block)
+
+BlockchainInfo = _reflection.GeneratedProtocolMessageType('BlockchainInfo', (_message.Message,), dict(
+ DESCRIPTOR = _BLOCKCHAININFO,
+ __module__ = 'fabric_pb2'
+ # @@protoc_insertion_point(class_scope:protos.BlockchainInfo)
+ ))
+_sym_db.RegisterMessage(BlockchainInfo)
+
+NonHashData = _reflection.GeneratedProtocolMessageType('NonHashData', (_message.Message,), dict(
+ DESCRIPTOR = _NONHASHDATA,
+ __module__ = 'fabric_pb2'
+ # @@protoc_insertion_point(class_scope:protos.NonHashData)
+ ))
+_sym_db.RegisterMessage(NonHashData)
+
+PeerAddress = _reflection.GeneratedProtocolMessageType('PeerAddress', (_message.Message,), dict(
+ DESCRIPTOR = _PEERADDRESS,
+ __module__ = 'fabric_pb2'
+ # @@protoc_insertion_point(class_scope:protos.PeerAddress)
+ ))
+_sym_db.RegisterMessage(PeerAddress)
+
+PeerID = _reflection.GeneratedProtocolMessageType('PeerID', (_message.Message,), dict(
+ DESCRIPTOR = _PEERID,
+ __module__ = 'fabric_pb2'
+ # @@protoc_insertion_point(class_scope:protos.PeerID)
+ ))
+_sym_db.RegisterMessage(PeerID)
+
+PeerEndpoint = _reflection.GeneratedProtocolMessageType('PeerEndpoint', (_message.Message,), dict(
+ DESCRIPTOR = _PEERENDPOINT,
+ __module__ = 'fabric_pb2'
+ # @@protoc_insertion_point(class_scope:protos.PeerEndpoint)
+ ))
+_sym_db.RegisterMessage(PeerEndpoint)
+
+PeersMessage = _reflection.GeneratedProtocolMessageType('PeersMessage', (_message.Message,), dict(
+ DESCRIPTOR = _PEERSMESSAGE,
+ __module__ = 'fabric_pb2'
+ # @@protoc_insertion_point(class_scope:protos.PeersMessage)
+ ))
+_sym_db.RegisterMessage(PeersMessage)
+
+HelloMessage = _reflection.GeneratedProtocolMessageType('HelloMessage', (_message.Message,), dict(
+ DESCRIPTOR = _HELLOMESSAGE,
+ __module__ = 'fabric_pb2'
+ # @@protoc_insertion_point(class_scope:protos.HelloMessage)
+ ))
+_sym_db.RegisterMessage(HelloMessage)
+
+Message = _reflection.GeneratedProtocolMessageType('Message', (_message.Message,), dict(
+ DESCRIPTOR = _MESSAGE,
+ __module__ = 'fabric_pb2'
+ # @@protoc_insertion_point(class_scope:protos.Message)
+ ))
+_sym_db.RegisterMessage(Message)
+
+Response = _reflection.GeneratedProtocolMessageType('Response', (_message.Message,), dict(
+ DESCRIPTOR = _RESPONSE,
+ __module__ = 'fabric_pb2'
+ # @@protoc_insertion_point(class_scope:protos.Response)
+ ))
+_sym_db.RegisterMessage(Response)
+
+BlockState = _reflection.GeneratedProtocolMessageType('BlockState', (_message.Message,), dict(
+ DESCRIPTOR = _BLOCKSTATE,
+ __module__ = 'fabric_pb2'
+ # @@protoc_insertion_point(class_scope:protos.BlockState)
+ ))
+_sym_db.RegisterMessage(BlockState)
+
+SyncBlockRange = _reflection.GeneratedProtocolMessageType('SyncBlockRange', (_message.Message,), dict(
+ DESCRIPTOR = _SYNCBLOCKRANGE,
+ __module__ = 'fabric_pb2'
+ # @@protoc_insertion_point(class_scope:protos.SyncBlockRange)
+ ))
+_sym_db.RegisterMessage(SyncBlockRange)
+
+SyncBlocks = _reflection.GeneratedProtocolMessageType('SyncBlocks', (_message.Message,), dict(
+ DESCRIPTOR = _SYNCBLOCKS,
+ __module__ = 'fabric_pb2'
+ # @@protoc_insertion_point(class_scope:protos.SyncBlocks)
+ ))
+_sym_db.RegisterMessage(SyncBlocks)
+
+SyncStateSnapshotRequest = _reflection.GeneratedProtocolMessageType('SyncStateSnapshotRequest', (_message.Message,), dict(
+ DESCRIPTOR = _SYNCSTATESNAPSHOTREQUEST,
+ __module__ = 'fabric_pb2'
+ # @@protoc_insertion_point(class_scope:protos.SyncStateSnapshotRequest)
+ ))
+_sym_db.RegisterMessage(SyncStateSnapshotRequest)
+
+SyncStateSnapshot = _reflection.GeneratedProtocolMessageType('SyncStateSnapshot', (_message.Message,), dict(
+ DESCRIPTOR = _SYNCSTATESNAPSHOT,
+ __module__ = 'fabric_pb2'
+ # @@protoc_insertion_point(class_scope:protos.SyncStateSnapshot)
+ ))
+_sym_db.RegisterMessage(SyncStateSnapshot)
+
+SyncStateDeltasRequest = _reflection.GeneratedProtocolMessageType('SyncStateDeltasRequest', (_message.Message,), dict(
+ DESCRIPTOR = _SYNCSTATEDELTASREQUEST,
+ __module__ = 'fabric_pb2'
+ # @@protoc_insertion_point(class_scope:protos.SyncStateDeltasRequest)
+ ))
+_sym_db.RegisterMessage(SyncStateDeltasRequest)
+
+SyncStateDeltas = _reflection.GeneratedProtocolMessageType('SyncStateDeltas', (_message.Message,), dict(
+ DESCRIPTOR = _SYNCSTATEDELTAS,
+ __module__ = 'fabric_pb2'
+ # @@protoc_insertion_point(class_scope:protos.SyncStateDeltas)
+ ))
+_sym_db.RegisterMessage(SyncStateDeltas)
+
+
+import abc
+import six
+from grpc.beta import implementations as beta_implementations
+from grpc.beta import interfaces as beta_interfaces
+from grpc.framework.common import cardinality
+from grpc.framework.interfaces.face import utilities as face_utilities
+
+class BetaPeerServicer(object):
+ """Interface exported by the server.
+ """
+ def Chat(self, request_iterator, context):
+ """Accepts a stream of Message during chat session, while receiving
+ other Message (e.g. from other peers).
+ """
+ context.code(beta_interfaces.StatusCode.UNIMPLEMENTED)
+ def ProcessTransaction(self, request, context):
+ """Process a transaction from a remote source.
+ """
+ context.code(beta_interfaces.StatusCode.UNIMPLEMENTED)
+
+class BetaPeerStub(object):
+ """Interface exported by the server.
+ """
+ def Chat(self, request_iterator, timeout):
+ """Accepts a stream of Message during chat session, while receiving
+ other Message (e.g. from other peers).
+ """
+ raise NotImplementedError()
+ def ProcessTransaction(self, request, timeout):
+ """Process a transaction from a remote source.
+ """
+ raise NotImplementedError()
+ ProcessTransaction.future = None
+
+def beta_create_Peer_server(servicer, pool=None, pool_size=None, default_timeout=None, maximum_timeout=None):
+ import fabric_pb2
+ import fabric_pb2
+ import fabric_pb2
+ import fabric_pb2
+ request_deserializers = {
+ ('protos.Peer', 'Chat'): fabric_pb2.Message.FromString,
+ ('protos.Peer', 'ProcessTransaction'): fabric_pb2.Transaction.FromString,
+ }
+ response_serializers = {
+ ('protos.Peer', 'Chat'): fabric_pb2.Message.SerializeToString,
+ ('protos.Peer', 'ProcessTransaction'): fabric_pb2.Response.SerializeToString,
+ }
+ method_implementations = {
+ ('protos.Peer', 'Chat'): face_utilities.stream_stream_inline(servicer.Chat),
+ ('protos.Peer', 'ProcessTransaction'): face_utilities.unary_unary_inline(servicer.ProcessTransaction),
+ }
+ server_options = beta_implementations.server_options(request_deserializers=request_deserializers, response_serializers=response_serializers, thread_pool=pool, thread_pool_size=pool_size, default_timeout=default_timeout, maximum_timeout=maximum_timeout)
+ return beta_implementations.server(method_implementations, options=server_options)
+
+def beta_create_Peer_stub(channel, host=None, metadata_transformer=None, pool=None, pool_size=None):
+ import fabric_pb2
+ import fabric_pb2
+ import fabric_pb2
+ import fabric_pb2
+ request_serializers = {
+ ('protos.Peer', 'Chat'): fabric_pb2.Message.SerializeToString,
+ ('protos.Peer', 'ProcessTransaction'): fabric_pb2.Transaction.SerializeToString,
+ }
+ response_deserializers = {
+ ('protos.Peer', 'Chat'): fabric_pb2.Message.FromString,
+ ('protos.Peer', 'ProcessTransaction'): fabric_pb2.Response.FromString,
+ }
+ cardinalities = {
+ 'Chat': cardinality.Cardinality.STREAM_STREAM,
+ 'ProcessTransaction': cardinality.Cardinality.UNARY_UNARY,
+ }
+ stub_options = beta_implementations.stub_options(host=host, metadata_transformer=metadata_transformer, request_serializers=request_serializers, response_deserializers=response_deserializers, thread_pool=pool, thread_pool_size=pool_size)
+ return beta_implementations.dynamic_stub(channel, 'protos.Peer', cardinalities, options=stub_options)
+# @@protoc_insertion_point(module_scope)
diff --git a/bddtests/java_shim.feature b/bddtests/java_shim.feature
new file mode 100644
index 00000000000..929e2e15130
--- /dev/null
+++ b/bddtests/java_shim.feature
@@ -0,0 +1,49 @@
+# Test Hyperledger Peers
+#
+# Tags that can be used and will affect test internals:
+#
+# @doNotDecompose will NOT decompose the named compose_yaml after scenario ends. Useful for setting up environment and reviewing after scenario.
+#
+# @chaincodeImagesUpToDate use this if all scenarios chaincode images are up to date, and do NOT require building. BE SURE!!!
+
+#@chaincodeImagesUpToDate
+Feature: SimpleSample Java example
+
+#@doNotDecompose
+# @wip
+ Scenario: java SimpleSample chaincode example single peer
+ Given we compose "docker-compose-1.yml"
+ When requesting "/chain" from "vp0"
+ Then I should get a JSON response with "height" = "1"
+ When I deploy lang chaincode "core/chaincode/shim/java" of "JAVA" with ctor "init" to "vp0"
+ | arg1 | arg2 | arg3 | arg4 |
+ | a | 100 | b | 200 |
+ Then I should have received a chaincode name
+ Then I wait up to "300" seconds for transaction to be committed to all peers
+
+ When requesting "/chain" from "vp0"
+ Then I should get a JSON response with "height" = "2"
+
+ When I query chaincode "example2" function name "query" on "vp0":
+ |arg1|
+ | a |
+ Then I should get a JSON response with "result.message" = "{'Name':'a','Amount':'100'}"
+
+ When I invoke chaincode "example2" function name "transfer" on "vp0"
+ |arg1|arg2|arg3|
+ | a | b | 10 |
+ Then I should have received a transactionID
+ Then I wait up to "25" seconds for transaction to be committed to all peers
+
+ When requesting "/chain" from "vp0"
+ Then I should get a JSON response with "height" = "3"
+
+ When I query chaincode "example2" function name "query" on "vp0":
+ |arg1|
+ | a |
+ Then I should get a JSON response with "result.message" = "{'Name':'a','Amount':'90'}"
+
+ When I query chaincode "example2" function name "query" on "vp0":
+ |arg1|
+ | b |
+ Then I should get a JSON response with "result.message" = "{'Name':'b','Amount':'210'}"
diff --git a/bddtests/peer_basic.feature b/bddtests/peer_basic.feature
new file mode 100644
index 00000000000..3d77a975a93
--- /dev/null
+++ b/bddtests/peer_basic.feature
@@ -0,0 +1,1416 @@
+#
+# Test Fabric Peers
+#
+# Tags that can be used and will affect test internals:
+# @doNotDecompose will NOT decompose the named compose_yaml after scenario ends. Useful for setting up environment and reviewing after scenario.
+# @chaincodeImagesUpToDate use this if all scenarios chaincode images are up to date, and do NOT require building. BE SURE!!!
+
+#@chaincodeImagesUpToDate
+Feature: Network of Peers
+ As a Fabric developer
+ I want to run a network of peers
+
+# @wip
+ Scenario: Peers list test, single peer issue #827
+ Given we compose "docker-compose-1.yml"
+ When requesting "/network/peers" from "vp0"
+ Then I should get a JSON response with array "peers" contains "1" elements
+
+# @wip
+ Scenario: Peers list test,3 peers issue #827
+ Given we compose "docker-compose-3.yml"
+ When requesting "/network/peers" from "vp0"
+ Then I should get a JSON response with array "peers" contains "3" elements
+
+# @doNotDecompose
+ @wip
+ @issue_767
+ Scenario: Range query test, single peer, issue #767
+ Given we compose "docker-compose-1.yml"
+ When requesting "/chain" from "vp0"
+ Then I should get a JSON response with "height" = "1"
+ When I deploy chaincode "github.com/hyperledger/fabric/examples/chaincode/go/map" with ctor "init" to "vp0"
+ ||
+ ||
+
+ Then I should have received a chaincode name
+ Then I wait up to "60" seconds for transaction to be committed to all peers
+
+ When requesting "/chain" from "vp0"
+ Then I should get a JSON response with "height" = "2"
+
+ When I invoke chaincode "map" function name "put" on "vp0"
+ | arg1 | arg2 |
+ | key1 | value1 |
+ Then I should have received a transactionID
+ Then I wait up to "25" seconds for transaction to be committed to all peers
+
+ When requesting "/chain" from "vp0"
+ Then I should get a JSON response with "height" = "3"
+
+ When I query chaincode "map" function name "get" on "vp0":
+ | arg1|
+ | key1 |
+ Then I should get a JSON response with "result.message" = "value1"
+
+ When I invoke chaincode "map" function name "put" on "vp0"
+ | arg1 | arg2 |
+ | key2 | value2 |
+ Then I should have received a transactionID
+ Then I wait up to "25" seconds for transaction to be committed to all peers
+
+ When requesting "/chain" from "vp0"
+ Then I should get a JSON response with "height" = "4"
+
+ When I query chaincode "map" function name "keys" on "vp0":
+ ||
+ ||
+ Then I should get a JSON response with "result.message" = "["key1","key2"]"
+
+ When I invoke chaincode "map" function name "remove" on "vp0"
+ | arg1 | |
+ | key1 | |
+ Then I should have received a transactionID
+ Then I wait up to "25" seconds for transaction to be committed to all peers
+
+ When requesting "/chain" from "vp0"
+ Then I should get a JSON response with "height" = "5"
+
+ When I query chaincode "map" function name "keys" on "vp0":
+ ||
+ ||
+ Then I should get a JSON response with "result.message" = "["key2"]"
+
+# @doNotDecompose
+ @wip
+ @issue_477
+ Scenario: chaincode shim table API, issue 477
+ Given we compose "docker-compose-1.yml"
+ When requesting "/chain" from "vp0"
+ Then I should get a JSON response with "height" = "1"
+ When I deploy chaincode "github.com/hyperledger/fabric/bddtests/chaincode/go/table" with ctor "init" to "vp0"
+ ||
+ ||
+ Then I should have received a chaincode name
+ Then I wait up to "60" seconds for transaction to be committed to all peers
+ When requesting "/chain" from "vp0"
+ Then I should get a JSON response with "height" = "2"
+
+ When I invoke chaincode "table_test" function name "insertRowTableOne" on "vp0"
+ | arg1 | arg2 | arg3 |
+ | test1| 10 | 20 |
+ Then I should have received a transactionID
+ Then I wait up to "25" seconds for transaction to be committed to all peers
+ When requesting "/chain" from "vp0"
+ Then I should get a JSON response with "height" = "3"
+
+ When I invoke chaincode "table_test" function name "insertRowTableOne" on "vp0"
+ | arg1 | arg2 | arg3 |
+ | test2| 10 | 20 |
+ Then I should have received a transactionID
+ Then I wait up to "25" seconds for transaction to be committed to all peers
+ When requesting "/chain" from "vp0"
+ Then I should get a JSON response with "height" = "4"
+
+ When I query chaincode "table_test" function name "getRowTableOne" on "vp0":
+ | arg1 |
+ | test1|
+ Then I should get a JSON response with "result.message" = "{[string:"test1" int32:10 int32:20 ]}"
+
+ When I invoke chaincode "table_test" function name "insertRowTableTwo" on "vp0"
+ | arg1 | arg2 | arg3 | arg3 |
+ | foo2 | 34 | 65 | bar8 |
+ Then I should have received a transactionID
+ Then I wait up to "25" seconds for transaction to be committed to all peers
+ When requesting "/chain" from "vp0"
+ Then I should get a JSON response with "height" = "5"
+
+ When I query chaincode "table_test" function name "getRowTableTwo" on "vp0":
+ | arg1 | arg2 | arg3 |
+ | foo2 | 65 | bar8 |
+ Then I should get a JSON response with "result.message" = "{[string:"foo2" int32:34 int32:65 string:"bar8" ]}"
+
+ When I invoke chaincode "table_test" function name "replaceRowTableOne" on "vp0"
+ | arg1 | arg2 | arg3 |
+ | test1| 30 | 40 |
+ Then I should have received a transactionID
+ Then I wait up to "25" seconds for transaction to be committed to all peers
+ When requesting "/chain" from "vp0"
+ Then I should get a JSON response with "height" = "6"
+
+ When I query chaincode "table_test" function name "getRowTableOne" on "vp0":
+ | arg1 |
+ | test1|
+ Then I should get a JSON response with "result.message" = "{[string:"test1" int32:30 int32:40 ]}"
+
+ When I invoke chaincode "table_test" function name "deleteRowTableOne" on "vp0"
+ | arg1 |
+ | test1|
+ Then I should have received a transactionID
+ Then I wait up to "25" seconds for transaction to be committed to all peers
+ When requesting "/chain" from "vp0"
+ Then I should get a JSON response with "height" = "7"
+
+ When I query chaincode "table_test" function name "getRowTableOne" on "vp0":
+ | arg1 |
+ | test1|
+ Then I should get a JSON response with "result.message" = "{[]}"
+
+ When I query chaincode "table_test" function name "getRowTableOne" on "vp0":
+ | arg1 |
+ | test2|
+ Then I should get a JSON response with "result.message" = "{[string:"test2" int32:10 int32:20 ]}"
+
+ When I invoke chaincode "table_test" function name "insertRowTableOne" on "vp0"
+ | arg1 | arg2 | arg3 |
+ | test3| 10 | 20 |
+ Then I should have received a transactionID
+ Then I wait up to "25" seconds for transaction to be committed to all peers
+ When requesting "/chain" from "vp0"
+ Then I should get a JSON response with "height" = "8"
+
+ When I invoke chaincode "table_test" function name "insertRowTableOne" on "vp0"
+ | arg1 | arg2 | arg3 |
+ | test4| 10 | 20 |
+ Then I should have received a transactionID
+ Then I wait up to "25" seconds for transaction to be committed to all peers
+ When requesting "/chain" from "vp0"
+ Then I should get a JSON response with "height" = "9"
+
+ When I invoke chaincode "table_test" function name "insertRowTableOne" on "vp0"
+ | arg1 | arg2 | arg3 |
+ | test5| 10 | 20 |
+ Then I should have received a transactionID
+ Then I wait up to "25" seconds for transaction to be committed to all peers
+ When requesting "/chain" from "vp0"
+ Then I should get a JSON response with "height" = "10"
+
+ When I query chaincode "table_test" function name "getRowTableOne" on "vp0":
+ | arg1 |
+ | test3|
+ Then I should get a JSON response with "result.message" = "{[string:"test3" int32:10 int32:20 ]}"
+
+ When I query chaincode "table_test" function name "getRowTableOne" on "vp0":
+ | arg1 |
+ | test4|
+ Then I should get a JSON response with "result.message" = "{[string:"test4" int32:10 int32:20 ]}"
+
+ When I query chaincode "table_test" function name "getRowTableOne" on "vp0":
+ | arg1 |
+ | test5|
+ Then I should get a JSON response with "result.message" = "{[string:"test5" int32:10 int32:20 ]}"
+
+ When I invoke chaincode "table_test" function name "insertRowTableTwo" on "vp0"
+ | arg1 | arg2 | arg3 | arg3 |
+ | foo2 | 35 | 65 | bar10 |
+ Then I should have received a transactionID
+ Then I wait up to "25" seconds for transaction to be committed to all peers
+ When requesting "/chain" from "vp0"
+ Then I should get a JSON response with "height" = "11"
+
+ When I invoke chaincode "table_test" function name "insertRowTableTwo" on "vp0"
+ | arg1 | arg2 | arg3 | arg3 |
+ | foo2 | 36 | 65 | bar11 |
+ Then I should have received a transactionID
+ Then I wait up to "25" seconds for transaction to be committed to all peers
+ When requesting "/chain" from "vp0"
+ Then I should get a JSON response with "height" = "12"
+
+ When I invoke chaincode "table_test" function name "insertRowTableTwo" on "vp0"
+ | arg1 | arg2 | arg3 | arg3 |
+ | foo2 | 37 | 65 | bar12 |
+ Then I should have received a transactionID
+ Then I wait up to "25" seconds for transaction to be committed to all peers
+ When requesting "/chain" from "vp0"
+ Then I should get a JSON response with "height" = "13"
+
+ When I invoke chaincode "table_test" function name "insertRowTableTwo" on "vp0"
+ | arg1 | arg2 | arg3 | arg3 |
+ | foo2 | 38 | 66 | bar10 |
+ Then I should have received a transactionID
+ Then I wait up to "25" seconds for transaction to be committed to all peers
+ When requesting "/chain" from "vp0"
+ Then I should get a JSON response with "height" = "14"
+
+ When I query chaincode "table_test" function name "getRowsTableTwo" on "vp0":
+ | arg1 | arg2 |
+ | foo2 | 65 |
+ Then I should get a JSON response with "result.message" = "[{"columns":[{"Value":{"String_":"foo2"}},{"Value":{"Int32":37}},{"Value":{"Int32":65}},{"Value":{"String_":"bar12"}}]},{"columns":[{"Value":{"String_":"foo2"}},{"Value":{"Int32":34}},{"Value":{"Int32":65}},{"Value":{"String_":"bar8"}}]},{"columns":[{"Value":{"String_":"foo2"}},{"Value":{"Int32":36}},{"Value":{"Int32":65}},{"Value":{"String_":"bar11"}}]},{"columns":[{"Value":{"String_":"foo2"}},{"Value":{"Int32":35}},{"Value":{"Int32":65}},{"Value":{"String_":"bar10"}}]}]"
+
+ When I query chaincode "table_test" function name "getRowsTableTwo" on "vp0":
+ | arg1 | arg2 |
+ | foo2 | 66 |
+ Then I should get a JSON response with "result.message" = "[{"columns":[{"Value":{"String_":"foo2"}},{"Value":{"Int32":38}},{"Value":{"Int32":66}},{"Value":{"String_":"bar10"}}]}]"
+
+ When I query chaincode "table_test" function name "getRowsTableTwo" on "vp0":
+ | arg1 |
+ | foo2 |
+ Then I should get a JSON response with "result.message" = "[{"columns":[{"Value":{"String_":"foo2"}},{"Value":{"Int32":37}},{"Value":{"Int32":65}},{"Value":{"String_":"bar12"}}]},{"columns":[{"Value":{"String_":"foo2"}},{"Value":{"Int32":34}},{"Value":{"Int32":65}},{"Value":{"String_":"bar8"}}]},{"columns":[{"Value":{"String_":"foo2"}},{"Value":{"Int32":36}},{"Value":{"Int32":65}},{"Value":{"String_":"bar11"}}]},{"columns":[{"Value":{"String_":"foo2"}},{"Value":{"Int32":38}},{"Value":{"Int32":66}},{"Value":{"String_":"bar10"}}]},{"columns":[{"Value":{"String_":"foo2"}},{"Value":{"Int32":35}},{"Value":{"Int32":65}},{"Value":{"String_":"bar10"}}]}]"
+
+ When I invoke chaincode "table_test" function name "deleteAndRecreateTableOne" on "vp0"
+ ||
+ ||
+ Then I should have received a transactionID
+ Then I wait up to "25" seconds for transaction to be committed to all peers
+ When requesting "/chain" from "vp0"
+ Then I should get a JSON response with "height" = "15"
+
+ When I query chaincode "table_test" function name "getRowTableOne" on "vp0":
+ | arg1 |
+ | test3|
+ Then I should get a JSON response with "result.message" = "{[]}"
+
+ When I query chaincode "table_test" function name "getRowTableOne" on "vp0":
+ | arg1 |
+ | test4|
+ Then I should get a JSON response with "result.message" = "{[]}"
+
+ When I query chaincode "table_test" function name "getRowTableOne" on "vp0":
+ | arg1 |
+ | test5|
+ Then I should get a JSON response with "result.message" = "{[]}"
+
+ When I query chaincode "table_test" function name "getRowTableOne" on "vp0":
+ | arg1 |
+ | test2|
+ Then I should get a JSON response with "result.message" = "{[]}"
+
+ When I query chaincode "table_test" function name "getRowTableTwo" on "vp0":
+ | arg1 | arg2 | arg3 |
+ | foo2 | 65 | bar8 |
+ Then I should get a JSON response with "result.message" = "{[string:"foo2" int32:34 int32:65 string:"bar8" ]}"
+
+ When I invoke chaincode "table_test" function name "insertRowTableThree" on "vp0"
+ | arg1 | arg2 | arg3 | arg4 | arg5 | arg6 | arg7 |
+ | foo2 | -38 | -66 | 77 | 88 | hello| true |
+ Then I should have received a transactionID
+ Then I wait up to "25" seconds for transaction to be committed to all peers
+ When requesting "/chain" from "vp0"
+ Then I should get a JSON response with "height" = "16"
+
+ When I query chaincode "table_test" function name "getRowTableThree" on "vp0":
+ | arg1 |
+ | foo2 |
+ Then I should get a JSON response with "result.message" = "{[string:"foo2" int32:-38 int64:-66 uint32:77 uint64:88 bytes:"hello" bool:true ]}"
+
+ When I invoke chaincode "table_test" function name "insertRowTableFour" on "vp0"
+ | arg1 |
+ | foobar |
+ Then I should have received a transactionID
+ Then I wait up to "25" seconds for transaction to be committed to all peers
+ When requesting "/chain" from "vp0"
+ Then I should get a JSON response with "height" = "17"
+
+ When I query chaincode "table_test" function name "getRowTableFour" on "vp0":
+ | arg1 |
+ | foobar |
+ Then I should get a JSON response with "result.message" = "{[string:"foobar" ]}"
+
+ When I query chaincode "table_test" function name "getRowsTableFour" on "vp0":
+ | arg1 |
+ | foobar |
+ Then I should get a JSON response with "result.message" = "[{"columns":[{"Value":{"String_":"foobar"}}]}]"
+
+@doNotDecompose
+# @wip
+ Scenario: chaincode example 01 single peer erroneous TX
+ Given we compose "docker-compose-1.yml"
+ When requesting "/chain" from "vp0"
+ Then I should get a JSON response with "height" = "1"
+ When I deploy chaincode "github.com/hyperledger/fabric/examples/chaincode/go/chaincode_example01" with ctor "init" to "vp0"
+ | arg1 | arg2 | arg3 | arg4 |
+ | a | 100 | b | 200 |
+ Then I should have received a chaincode name
+ Then I wait up to "60" seconds for transaction to be committed to all peers
+
+ When requesting "/chain" from "vp0"
+ Then I should get a JSON response with "height" = "2"
+
+ When I invoke chaincode "example1" function name "invoke" on "vp0"
+ |arg1|
+ | 1 |
+ Then I should have received a transactionID
+ Then I wait up to "25" seconds for transaction to be committed to all peers
+
+ When requesting "/chain" from "vp0"
+ Then I should get a JSON response with "height" = "3"
+ When requesting "/chain/blocks/2" from "vp0"
+ Then I should get a JSON response containing "transactions" attribute
+
+ When I invoke chaincode "example1" function name "invoke" on "vp0"
+ |arg1|
+ | a |
+ Then I should have received a transactionID
+ Then I wait "10" seconds
+ When requesting "/chain" from "vp0"
+ Then I should get a JSON response with "height" = "4"
+ When requesting "/chain/blocks/3" from "vp0"
+ Then I should get a JSON response containing no "transactions" attribute
+
+# @doNotDecompose
+# @wip
+# Arg[0] = a, base64 = 'YQ=='
+# sha256 = 'ca978112ca1bbdcafac231b39a23dc4da786eff8147c4e72b9807785afee48bb'
+ Scenario: chaincode map single peer content generated ID
+ Given we compose "docker-compose-1.yml"
+ When requesting "/chain" from "vp0"
+ Then I should get a JSON response with "height" = "1"
+ When I deploy chaincode "github.com/hyperledger/fabric/examples/chaincode/go/map" with ctor "init" to "vp0"
+ ||
+ ||
+ Then I should have received a chaincode name
+ Then I wait up to "60" seconds for transaction to be committed to all peers
+
+ When I invoke chaincode "map" function name "put" on "vp0" with "sha256base64"
+ | arg1 |arg2|
+ | YQ== | 10 |
+ Then I should have received a transactionID
+ Then I wait up to "25" seconds for transaction to be committed to all peers
+ Then I check the transaction ID if it is "ca978112-ca1b-bdca-fac2-31b39a23dc4d"
+
+ Scenario: chaincode example 01 single peer rejection message
+ Given we compose "docker-compose-1-exp.yml"
+ Given I start a listener
+ Then I wait "5" seconds
+
+ When requesting "/chain" from "vp0"
+ Then I should get a JSON response with "height" = "1"
+ When I deploy chaincode "github.com/hyperledger/fabric/examples/chaincode/go/chaincode_example01" with ctor "init" to "vp0"
+ | arg1 | arg2 | arg3 | arg4 |
+ | a | 100 | b | 200 |
+ Then I should have received a chaincode name
+ Then I wait up to "60" seconds for transaction to be committed to all peers
+
+ When I invoke chaincode "example1" function name "invoke" on "vp0"
+ |arg1|
+ | a |
+ Then I should have received a transactionID
+ Then I wait "10" seconds
+
+ Then I should get a rejection message in the listener after stopping it
+
+# @doNotDecompose
+# @wip
+ Scenario: chaincode example 02 single peer
+ Given we compose "docker-compose-1.yml"
+ When requesting "/chain" from "vp0"
+ Then I should get a JSON response with "height" = "1"
+ When I deploy chaincode "github.com/hyperledger/fabric/examples/chaincode/go/chaincode_example02" with ctor "init" to "vp0"
+ | arg1 | arg2 | arg3 | arg4 |
+ | a | 100 | b | 200 |
+ Then I should have received a chaincode name
+ Then I wait up to "60" seconds for transaction to be committed to all peers
+
+ When requesting "/chain" from "vp0"
+ Then I should get a JSON response with "height" = "2"
+
+ When I query chaincode "example2" function name "query" on "vp0":
+ |arg1|
+ | a |
+ Then I should get a JSON response with "result.message" = "100"
+
+
+ When I invoke chaincode "example2" function name "invoke" on "vp0"
+ |arg1|arg2|arg3|
+ | a | b | 10 |
+ Then I should have received a transactionID
+ Then I wait up to "25" seconds for transaction to be committed to all peers
+
+ When requesting "/chain" from "vp0"
+ Then I should get a JSON response with "height" = "3"
+
+ When I query chaincode "example2" function name "query" on "vp0":
+ |arg1|
+ | a |
+ Then I should get a JSON response with "result.message" = "90"
+
+ When I query chaincode "example2" function name "query" on "vp0":
+ |arg1|
+ | b |
+ Then I should get a JSON response with "result.message" = "210"
+
+# @doNotDecompose
+# @wip
+ Scenario: chaincode example02 with 5 peers, issue #520
+ Given we compose "docker-compose-5.yml"
+ When requesting "/chain" from "vp0"
+ Then I should get a JSON response with "height" = "1"
+
+ When I deploy chaincode "github.com/hyperledger/fabric/examples/chaincode/go/chaincode_example02" with ctor "init" to "vp0"
+ | arg1 | arg2 | arg3 | arg4 |
+ | a | 100 | b | 200 |
+ Then I should have received a chaincode name
+ Then I wait up to "60" seconds for transaction to be committed to all peers
+
+ When I query chaincode "example2" function name "query" on all peers:
+ |arg1|
+ | a |
+ Then I should get a JSON response from all peers with "result.message" = "100"
+
+ When I invoke chaincode "example2" function name "invoke" on "vp0"
+ |arg1|arg2|arg3|
+ | a | b | 20 |
+ Then I should have received a transactionID
+ Then I wait up to "20" seconds for transaction to be committed to all peers
+
+ When I query chaincode "example2" function name "query" on all peers:
+ |arg1|
+ | a |
+ Then I should get a JSON response from all peers with "result.message" = "80"
+
+
+# @doNotDecompose
+# @wip
+ @issue_567
+ Scenario Outline: chaincode example02 with 4 peers and 1 membersrvc, issue #567
+
+ Given we compose ""
+ And I register with CA supplying username "binhn" and secret "7avZQLwcUe9q" on peers:
+ | vp0 |
+ And I use the following credentials for querying peers:
+ | peer | username | secret |
+ | vp0 | test_user0 | MS9qrN8hFjlE |
+ | vp1 | test_user1 | jGlNl6ImkuDo |
+ | vp2 | test_user2 | zMflqOKezFiA |
+ | vp3 | test_user3 | vWdLCE00vJy0 |
+
+ When requesting "/chain" from "vp0"
+ Then I should get a JSON response with "height" = "1"
+ And I wait "2" seconds
+ When I deploy chaincode "github.com/hyperledger/fabric/examples/chaincode/go/chaincode_example02" with ctor "init" to "vp0"
+ | arg1 | arg2 | arg3 | arg4 |
+ | a | 100 | b | 200 |
+ Then I should have received a chaincode name
+ Then I wait up to "" seconds for transaction to be committed to peers:
+ | vp0 | vp1 | vp2 | vp3 |
+
+ When I query chaincode "example2" function name "query" with value "a" on peers:
+ | vp0 | vp1 | vp2 | vp3 |
+ Then I should get a JSON response from peers with "result.message" = "100"
+ | vp0 | vp1 | vp2 | vp3 |
+
+ When I invoke chaincode "example2" function name "invoke" on "vp0"
+ |arg1|arg2|arg3|
+ | a | b | 20 |
+ Then I should have received a transactionID
+ Then I wait up to "10" seconds for transaction to be committed to peers:
+ | vp0 | vp1 | vp2 | vp3 |
+
+ When I query chaincode "example2" function name "query" with value "a" on peers:
+ | vp0 | vp1 | vp2 | vp3 |
+ Then I should get a JSON response from peers with "result.message" = "80"
+ | vp0 | vp1 | vp2 | vp3 |
+
+ Examples: Consensus Options
+ | ComposeFile | WaitTime |
+ | docker-compose-4-consensus-noops.yml | 60 |
+ | docker-compose-4-consensus-batch.yml | 60 |
+
+
+ #@doNotDecompose
+ #@wip
+ @issue_680
+ Scenario Outline: chaincode example02 with 4 peers and 1 membersrvc, issue #680 (State transfer)
+
+ Given we compose ""
+ And I register with CA supplying username "binhn" and secret "7avZQLwcUe9q" on peers:
+ | vp0 |
+ And I use the following credentials for querying peers:
+ | peer | username | secret |
+ | vp0 | test_user0 | MS9qrN8hFjlE |
+ | vp1 | test_user1 | jGlNl6ImkuDo |
+ | vp2 | test_user2 | zMflqOKezFiA |
+ | vp3 | test_user3 | vWdLCE00vJy0 |
+
+ When requesting "/chain" from "vp0"
+ Then I should get a JSON response with "height" = "1"
+
+
+ # Deploy
+ When I deploy chaincode "github.com/hyperledger/fabric/examples/chaincode/go/chaincode_example02" with ctor "init" to "vp0"
+ | arg1 | arg2 | arg3 | arg4 |
+ | a | 100 | b | 200 |
+ Then I should have received a chaincode name
+ Then I wait up to "" seconds for transaction to be committed to peers:
+ | vp0 | vp1 | vp2 |
+
+ # Build up a sizable blockchain, that vp3 will need to validate at startup
+ When I invoke chaincode "example2" function name "invoke" on "vp0" "30" times
+ |arg1|arg2|arg3|
+ | b | a | 1 |
+ Then I should have received a transactionID
+ Then I wait up to "120" seconds for transaction to be committed to peers:
+ | vp0 | vp1 | vp2 | vp3 |
+
+ When I query chaincode "example2" function name "query" with value "a" on peers:
+ | vp0 | vp1 | vp2 | vp3 |
+ Then I should get a JSON response from peers with "result.message" = "130"
+ | vp0 | vp1 | vp2 | vp3 |
+
+ # STOPPING vp3!!!!!!!!!!!!!!!!!!!!!!!!!!
+ Given I stop peers:
+ | vp3 |
+
+ # Invoke a transaction to get vp3 out of sync
+ When I invoke chaincode "example2" function name "invoke" on "vp0"
+ |arg1|arg2|arg3|
+ | a | b | 10 |
+ Then I should have received a transactionID
+ Then I wait up to "120" seconds for transaction to be committed to peers:
+ | vp0 | vp1 | vp2 |
+
+ When I query chaincode "example2" function name "query" with value "a" on peers:
+ | vp0 | vp1 | vp2 |
+ Then I should get a JSON response from peers with "result.message" = "120"
+ | vp0 | vp1 | vp2 |
+
+ # Now start vp3 again
+ Given I start peers:
+ | vp3 |
+ And I wait "15" seconds
+
+ # Invoke 10 more txs, this will trigger a state transfer, set a target, and execute new outstanding transactions
+ When I invoke chaincode "example2" function name "invoke" on "vp0" "10" times
+ |arg1|arg2|arg3|
+ | a | b | 10 |
+ Then I should have received a transactionID
+ Then I wait up to "60" seconds for transaction to be committed to peers:
+ | vp0 | vp1 | vp2 | vp3 |
+ # wait a bit longer and let state transfer finish
+ Then I wait "60" seconds
+ When I query chaincode "example2" function name "query" with value "a" on peers:
+ | vp0 | vp1 | vp2 | vp3 |
+ Then I should get a JSON response from peers with "result.message" = "20"
+ | vp0 | vp1 | vp2 | vp3 |
+
+
+ Examples: Consensus Options
+ | ComposeFile | WaitTime |
+ | docker-compose-4-consensus-batch.yml | 60 |
+
+
+# @doNotDecompose
+ @issue_724
+ Scenario Outline: chaincode example02 with 4 peers and 1 membersrvc, issue #724
+
+ Given we compose ""
+ And I register with CA supplying username "binhn" and secret "7avZQLwcUe9q" on peers:
+ | vp0 |
+ And I use the following credentials for querying peers:
+ | peer | username | secret |
+ | vp0 | test_user0 | MS9qrN8hFjlE |
+ | vp1 | test_user1 | jGlNl6ImkuDo |
+ | vp2 | test_user2 | zMflqOKezFiA |
+ | vp3 | test_user3 | vWdLCE00vJy0 |
+
+ When requesting "/chain" from "vp0"
+ Then I should get a JSON response with "height" = "1"
+ And I wait "2" seconds
+ When I deploy chaincode "github.com/hyperledger/fabric/examples/chaincode/go/chaincode_example02" with ctor "init" to "vp0"
+ | arg1 | arg2 | arg3 | arg4 |
+ | a | 100 | b | 200 |
+ Then I should have received a chaincode name
+ Then I wait up to "" seconds for transaction to be committed to peers:
+ | vp0 | vp1 | vp2 | vp3 |
+
+
+ When I query chaincode "example2" function name "query" with value "a" on peers:
+ | vp0 | vp1 | vp2 | vp3 |
+ Then I should get a JSON response from peers with "result.message" = "100"
+ | vp0 | vp1 | vp2 | vp3 |
+
+ Given I stop peers:
+ | vp0 | vp1 | vp2 | vp3 |
+ And I wait "1" seconds
+
+ Given I start peers:
+ | vp0 | vp1 | vp2 | vp3 |
+ And I wait "5" seconds
+
+ When I query chaincode "example2" function name "query" with value "a" on peers:
+ | vp3 |
+ Then I should get a JSON response from peers with "result.message" = "100"
+ | vp3 |
+
+ Examples: Consensus Options
+ | ComposeFile | WaitTime |
+ | docker-compose-4-consensus-noops.yml | 60 |
+
+
+# @doNotDecompose
+# @wip
+ Scenario: basic startup of 3 validating peers
+ Given we compose "docker-compose-3.yml"
+ When requesting "/chain" from "vp0"
+ Then I should get a JSON response with "height" = "1"
+
+ @TLS
+# @doNotDecompose
+ Scenario: basic startup of 2 validating peers using TLS
+ Given we compose "docker-compose-2-tls-basic.yml"
+ When requesting "/chain" from "vp0"
+ Then I should get a JSON response with "height" = "1"
+
+
+#@doNotDecompose
+#@wip
+#@skip
+ Scenario Outline: 4 peers and 1 membersrvc, consensus still works if one backup replica fails
+
+ Given we compose ""
+ And I use the following credentials for querying peers:
+ | peer | username | secret |
+ | vp0 | test_user0 | MS9qrN8hFjlE |
+ | vp1 | test_user1 | jGlNl6ImkuDo |
+ | vp2 | test_user2 | zMflqOKezFiA |
+ | vp3 | test_user3 | vWdLCE00vJy0 |
+ And I register with CA supplying username "test_user0" and secret "MS9qrN8hFjlE" on peers:
+ | vp0 |
+
+ When requesting "/chain" from "vp0"
+ Then I should get a JSON response with "height" = "1"
+
+ # Deploy
+ When I deploy chaincode "github.com/hyperledger/fabric/examples/chaincode/go/chaincode_example02" with ctor "init" to "vp0"
+ | arg1 | arg2 | arg3 | arg4 |
+ | a | 100 | b | 200 |
+ Then I should have received a chaincode name
+ Then I wait up to "" seconds for transaction to be committed to peers:
+ | vp0 | vp1 | vp2 | vp3 |
+
+ # get things started. All peers up and executing Txs
+ When I invoke chaincode "example2" function name "invoke" on "vp0" "5" times
+ |arg1|arg2|arg3|
+ | a | b | 1 |
+ Then I should have received a transactionID
+ Then I wait up to "60" seconds for transaction to be committed to peers:
+ | vp0 | vp1 | vp2 | vp3 |
+
+ When I query chaincode "example2" function name "query" with value "a" on peers:
+ | vp0 | vp1 | vp2 | vp3 |
+ Then I should get a JSON response from peers with "result.message" = "95"
+ | vp0 | vp1 | vp2 | vp3 |
+
+ # STOP vp2
+ Given I stop peers:
+ | vp2 |
+ And I wait "3" seconds
+
+ # continue invoking Txs
+ When I invoke chaincode "example2" function name "invoke" on "vp0" "5" times
+ |arg1|arg2|arg3|
+ | a | b | 1 |
+ Then I should have received a transactionID
+ Then I wait up to "10" seconds for transaction to be committed to peers:
+ | vp0 | vp1 | vp3 |
+
+ When I query chaincode "example2" function name "query" with value "a" on peers:
+ | vp0 | vp1 | vp3 |
+ Then I should get a JSON response from peers with "result.message" = "90"
+ | vp0 | vp1 | vp3 |
+
+ Examples: Consensus Options
+ | ComposeFile | WaitTime |
+ | docker-compose-4-consensus-batch.yml | 60 |
+
+#@doNotDecompose
+#@wip
+#@skip
+ Scenario Outline: 4 peers and 1 membersrvc, consensus fails if 2 backup replicas fail
+
+ Given we compose ""
+ And I use the following credentials for querying peers:
+ | peer | username | secret |
+ | vp0 | test_user0 | MS9qrN8hFjlE |
+ | vp1 | test_user1 | jGlNl6ImkuDo |
+ | vp2 | test_user2 | zMflqOKezFiA |
+ | vp3 | test_user3 | vWdLCE00vJy0 |
+ And I register with CA supplying username "test_user0" and secret "MS9qrN8hFjlE" on peers:
+ | vp0 |
+
+ When requesting "/chain" from "vp0"
+ Then I should get a JSON response with "height" = "1"
+
+ # Deploy
+ When I deploy chaincode "github.com/hyperledger/fabric/examples/chaincode/go/chaincode_example02" with ctor "init" to "vp0"
+ | arg1 | arg2 | arg3 | arg4 |
+ | a | 100 | b | 200 |
+ Then I should have received a chaincode name
+ Then I wait up to "" seconds for transaction to be committed to peers:
+ | vp0 | vp1 | vp2 | vp3 |
+
+ # get things started. All peers up and executing Txs
+ When I invoke chaincode "example2" function name "invoke" on "vp0" "5" times
+ |arg1|arg2|arg3|
+ | a | b | 1 |
+ Then I should have received a transactionID
+ Then I wait up to "60" seconds for transaction to be committed to peers:
+ | vp0 | vp1 | vp2 | vp3 |
+
+ When I query chaincode "example2" function name "query" with value "a" on peers:
+ | vp0 | vp1 | vp2 | vp3 |
+ Then I should get a JSON response from peers with "result.message" = "95"
+ | vp0 | vp1 | vp2 | vp3 |
+
+ # STOP vp2
+ Given I stop peers:
+ | vp1 | vp2 |
+ And I wait "3" seconds
+
+ # continue invoking Txs
+ When I invoke chaincode "example2" function name "invoke" on "vp0" "5" times
+ |arg1|arg2|arg3|
+ | a | b | 1 |
+ And I wait "5" seconds
+
+ When I query chaincode "example2" function name "query" with value "a" on peers:
+ | vp0 | vp3 |
+ Then I should get a JSON response from peers with "result.message" = "95"
+ | vp0 | vp3 |
+
+ Examples: Consensus Options
+ | ComposeFile | WaitTime |
+ | docker-compose-4-consensus-batch.yml | 60 |
+
+ #@doNotDecompose
+ #@wip
+ #@skip
+ Scenario Outline: 4 peers and 1 membersrvc, consensus still works if 1 peer (vp3) is byzantine
+
+ Given we compose ""
+ And I use the following credentials for querying peers:
+ | peer | username | secret |
+ | vp0 | test_user0 | MS9qrN8hFjlE |
+ | vp1 | test_user1 | jGlNl6ImkuDo |
+ | vp2 | test_user2 | zMflqOKezFiA |
+ | vp3 | test_user3 | vWdLCE00vJy0 |
+ And I register with CA supplying username "test_user0" and secret "MS9qrN8hFjlE" on peers:
+ | vp0 |
+
+ When requesting "/chain" from "vp0"
+ Then I should get a JSON response with "height" = "1"
+
+ # Deploy
+ When I deploy chaincode "github.com/hyperledger/fabric/examples/chaincode/go/chaincode_example02" with ctor "init" to "vp0"
+ | arg1 | arg2 | arg3 | arg4 |
+ | a | 100 | b | 200 |
+ Then I should have received a chaincode name
+ Then I wait up to "" seconds for transaction to be committed to peers:
+ | vp0 | vp1 | vp2 |
+
+ When I invoke chaincode "example2" function name "invoke" on "vp0" "50" times
+ |arg1|arg2|arg3|
+ | a | b | 1 |
+ Then I should have received a transactionID
+ Then I wait up to "60" seconds for transaction to be committed to peers:
+ | vp0 | vp1 | vp2 |
+
+ When I query chaincode "example2" function name "query" with value "a" on peers:
+ | vp0 | vp1 | vp2 |
+ Then I should get a JSON response from peers with "result.message" = "50"
+ | vp0 | vp1 | vp2 |
+
+ Examples: Consensus Options
+ | ComposeFile | WaitTime |
+ | docker-compose-4-consensus-batch.yml docker-compose-4-consensus-vp3-byzantine.yml | 60 |
+
+
+ #@doNotDecompose
+ @issue_1182
+ Scenario Outline: chaincode example02 with 4 peers,1 membersrvc, and 1 non-validating peer.
+
+ Given we compose ""
+ And I register with CA supplying username "binhn" and secret "7avZQLwcUe9q" on peers:
+ | nvp0 |
+ And I use the following credentials for querying peers:
+ | peer | username | secret |
+ | vp0 | test_user0 | MS9qrN8hFjlE |
+ | vp1 | test_user1 | jGlNl6ImkuDo |
+ | vp2 | test_user2 | zMflqOKezFiA |
+ | vp3 | test_user3 | vWdLCE00vJy0 |
+
+# Current issue as blocks NOT synced yet.
+# When requesting "/chain" from "nvp0"
+# Then I should get a JSON response with "height" = "1"
+
+
+ # Deploy
+ When I deploy chaincode "github.com/hyperledger/fabric/examples/chaincode/go/chaincode_example02" with ctor "init" to "nvp0"
+ | arg1 | arg2 | arg3 | arg4 |
+ | a | 100 | b | 200 |
+ Then I should have received a chaincode name
+ Then I wait up to "" seconds for transaction to be committed to peers:
+ | vp0 | vp1 | vp2 |
+
+ Examples: Consensus Options
+ | ComposeFile | WaitTime |
+ | docker-compose-4-consensus-batch.yml docker-compose-4-consensus-nvp0.yml | 60 |
+
+ #@doNotDecompose
+ @issue_1000
+ Scenario Outline: chaincode example02 with 4 peers and 1 membersrvc, test crash fault
+
+ Given we compose ""
+ And I register with CA supplying username "binhn" and secret "7avZQLwcUe9q" on peers:
+ | vp0 |
+ And I use the following credentials for querying peers:
+ | peer | username | secret |
+ | vp0 | test_user0 | MS9qrN8hFjlE |
+ | vp1 | test_user1 | jGlNl6ImkuDo |
+ | vp2 | test_user2 | zMflqOKezFiA |
+ | vp3 | test_user3 | vWdLCE00vJy0 |
+
+ When requesting "/chain" from "vp0"
+ Then I should get a JSON response with "height" = "1"
+
+ # Deploy
+ When I deploy chaincode "github.com/hyperledger/fabric/examples/chaincode/go/chaincode_example02" with ctor "init" to "vp0"
+ | arg1 | arg2 | arg3 | arg4 |
+ | a | 100 | b | 200 |
+ Then I should have received a chaincode name
+ Then I wait up to "" seconds for transaction to be committed to peers:
+ | vp0 | vp1 | vp2 | vp3 |
+
+ # Build up a sizable blockchain, to advance the sequence number
+ When I invoke chaincode "example2" function name "invoke" on "vp0" "30" times
+ |arg1|arg2|arg3|
+ | b | a | 1 |
+ Then I should have received a transactionID
+ Then I wait up to "60" seconds for transaction to be committed to peers:
+ | vp0 | vp1 | vp2 | vp3 |
+
+ When I query chaincode "example2" function name "query" with value "a" on peers:
+ | vp0 | vp1 | vp2 | vp3 |
+ Then I should get a JSON response from peers with "result.message" = "130"
+ | vp0 | vp1 | vp2 | vp3 |
+
+ # Stop vp1, vp2, vp3
+ Given I stop peers:
+ | vp1 | vp2 | vp3 |
+
+ # Now start vp1, vp2 again, hopefully retaining pbft state
+ Given I start peers:
+ | vp1 | vp2 |
+ And I wait "15" seconds
+
+ # Invoke 1 more tx, if the crash recovery worked, it will commit, otherwise, it will not
+ When I invoke chaincode "example2" function name "invoke" on "vp0"
+ |arg1|arg2|arg3|
+ | a | b | 10 |
+ Then I should have received a transactionID
+ Then I wait up to "60" seconds for transaction to be committed to peers:
+ | vp0 | vp1 | vp2 |
+ When I query chaincode "example2" function name "query" with value "a" on peers:
+ | vp0 | vp1 | vp2 |
+ Then I should get a JSON response from peers with "result.message" = "120"
+ | vp0 | vp1 | vp2 |
+
+
+ Examples: Consensus Options
+ | ComposeFile | WaitTime |
+ | docker-compose-4-consensus-batch.yml | 60 |
+
+
+
+ @issue_1091
+ Scenario Outline: chaincode example02 with 4 peers and 1 membersrvc, issue #1019 (out of date peer)
+
+ Given we compose ""
+ And I register with CA supplying username "binhn" and secret "7avZQLwcUe9q" on peers:
+ | vp0 |
+ And I use the following credentials for querying peers:
+ | peer | username | secret |
+ | vp0 | test_user0 | MS9qrN8hFjlE |
+ | vp1 | test_user1 | jGlNl6ImkuDo |
+ | vp2 | test_user2 | zMflqOKezFiA |
+ | vp3 | test_user3 | vWdLCE00vJy0 |
+
+ When requesting "/chain" from "vp0"
+ Then I should get a JSON response with "height" = "1"
+
+
+ # Deploy
+ When I deploy chaincode "github.com/hyperledger/fabric/examples/chaincode/go/chaincode_example02" with ctor "init" to "vp0"
+ | arg1 | arg2 | arg3 | arg4 |
+ | a | 100 | b | 200 |
+ Then I should have received a chaincode name
+ Then I wait up to "" seconds for transaction to be committed to peers:
+ | vp0 | vp1 | vp2 |
+
+ # STOPPING vp3!!!!!!!!!!!!!!!!!!!!!!!!!!
+ Given I stop peers:
+ | vp3 |
+
+ # Execute one request to get vp3 out of sync
+ When I invoke chaincode "example2" function name "invoke" on "vp0"
+ |arg1|arg2|arg3|
+ | b | a | 1 |
+ Then I should have received a transactionID
+ Then I wait up to "120" seconds for transaction to be committed to peers:
+ | vp0 | vp1 | vp2 |
+
+ When I query chaincode "example2" function name "query" with value "a" on peers:
+ | vp0 | vp1 | vp2 |
+ Then I should get a JSON response from peers with "result.message" = "101"
+ | vp0 | vp1 | vp2 |
+
+ # Now start vp3 again
+ Given I start peers:
+ | vp3 |
+ And I wait "15" seconds
+
+ # Invoke 8 more txs, this will trigger a state transfer, but it cannot complete
+ When I invoke chaincode "example2" function name "invoke" on "vp0" "8" times
+ |arg1|arg2|arg3|
+ | a | b | 10 |
+ Then I should have received a transactionID
+ Then I wait up to "60" seconds for transaction to be committed to peers:
+ | vp0 | vp1 | vp2 |
+ # wait a bit to make sure the state is invalid on vp3
+ Then I wait "20" seconds
+ When I query chaincode "example2" function name "query" with value "a" on peers:
+ | vp0 | vp1 | vp2 |
+ Then I should get a JSON response from peers with "result.message" = "21"
+ | vp0 | vp1 | vp2 |
+ When I unconditionally query chaincode "example2" function name "query" with value "a" on peers:
+ | vp3 |
+ Then I should get a JSON response from peers with "error.data" = "Error when querying chaincode: Error: state may be inconsistent, cannot query"
+ | vp3 |
+
+
+ Examples: Consensus Options
+ | ComposeFile | WaitTime |
+ | docker-compose-4-consensus-batch.yml | 60 |
+
+# @doNotDecompose
+# @wip
+ Scenario: chaincode example02 with 4 peers, one paused, issue #1056
+ Given we compose "docker-compose-4-consensus-batch.yml"
+ And I register with CA supplying username "binhn" and secret "7avZQLwcUe9q" on peers:
+ | vp0 |
+ And I use the following credentials for querying peers:
+ | peer | username | secret |
+ | vp0 | test_user0 | MS9qrN8hFjlE |
+ | vp1 | test_user1 | jGlNl6ImkuDo |
+ | vp2 | test_user2 | zMflqOKezFiA |
+ | vp3 | test_user3 | vWdLCE00vJy0 |
+
+ When requesting "/chain" from "vp0"
+ Then I should get a JSON response with "height" = "1"
+
+ Given I pause peers:
+ | vp3 |
+
+ When I deploy chaincode "github.com/hyperledger/fabric/examples/chaincode/go/chaincode_example02" with ctor "init" to "vp0"
+ | arg1 | arg2 | arg3 | arg4 |
+ | a | 100 | b | 200 |
+ Then I should have received a chaincode name
+ Then I wait up to "60" seconds for transaction to be committed to peers:
+ | vp0 | vp1 | vp2 |
+
+ When I query chaincode "example2" function name "query" with value "a" on peers:
+ | vp0 | vp1 | vp2 |
+ Then I should get a JSON response from peers with "result.message" = "100"
+ | vp0 | vp1 | vp2 |
+
+ When I invoke chaincode "example2" function name "invoke" on "vp0" "20" times
+ |arg1|arg2|arg3|
+ | a | b | 1 |
+ Then I should have received a transactionID
+ Then I wait up to "20" seconds for transaction to be committed to peers:
+ | vp0 | vp1 | vp2 |
+
+ When I query chaincode "example2" function name "query" with value "a" on peers:
+ | vp0 | vp1 | vp2 |
+ Then I should get a JSON response from peers with "result.message" = "80"
+ | vp0 | vp1 | vp2 |
+
+#@doNotDecompose
+# @wip
+@issue_1873
+ Scenario Outline: 4 peers and 1 membersrvc, consensus works if vp0 is stopped TTT3
+ Given we compose ""
+ And I use the following credentials for querying peers:
+ | peer | username | secret |
+ | vp0 | test_user0 | MS9qrN8hFjlE |
+ | vp1 | test_user1 | jGlNl6ImkuDo |
+ | vp2 | test_user2 | zMflqOKezFiA |
+ | vp3 | test_user3 | vWdLCE00vJy0 |
+ And I register with CA supplying username "test_user0" and secret "MS9qrN8hFjlE" on peers:
+ | vp0 |
+
+ When requesting "/chain" from "vp0"
+ Then I should get a JSON response with "height" = "1"
+
+ # Deploy
+ When I deploy chaincode "github.com/hyperledger/fabric/examples/chaincode/go/chaincode_example02" with ctor "init" to "vp0"
+ | arg1 | arg2 | arg3 | arg4 |
+ | a | 100 | b | 200 |
+ Then I should have received a chaincode name
+ Then I wait up to "" seconds for transaction to be committed to peers:
+ | vp0 | vp1 | vp2 | vp3 |
+
+ When requesting "/chain" from "vp0"
+ Then I should get a JSON response with "height" = "2"
+
+ # STOP vp0
+ Given I stop peers:
+ | vp0 |
+ And I wait "5" seconds
+
+ And I register with CA supplying username "test_user1" and secret "jGlNl6ImkuDo" on peers:
+ | vp1 |
+
+ When I invoke chaincode "example2" function name "invoke" on "vp1" "5" times
+ |arg1|arg2|arg3|
+ | a | b | 1 |
+ Then I should have received a transactionID
+ #Then I wait up to "120" seconds for transaction to be committed to peers:
+ # | vp0 | vp1 | vp2 | vp3 |
+ And I wait "120" seconds
+ When I query chaincode "example2" function name "query" with value "a" on peers:
+ | vp1 | vp2 | vp3 |
+ Then I should get a JSON response from peers with "result.message" = "95"
+ | vp1 | vp2 | vp3 |
+ Examples: Consensus Options
+ | ComposeFile | WaitTime |
+ | docker-compose-4-consensus-batch.yml | 60 |
+
+ #@doNotDecompose
+ #@wip
+ @issue_1851
+ Scenario Outline: verify reconnect of disconnected peer, issue #1851
+
+ Given we compose ""
+ And I wait "2" seconds
+
+ When requesting "/network/peers" from "vp0"
+ Then I should get a JSON response with array "peers" contains "2" elements
+
+ Given I stop peers:
+ | vp0 |
+
+ When requesting "/network/peers" from "vp1"
+ Then I should get a JSON response with array "peers" contains "1" elements
+
+ Given I start peers:
+ | vp0 |
+ And I wait "10" seconds
+
+ When requesting "/network/peers" from "vp1"
+ Then I should get a JSON response with array "peers" contains "2" elements
+
+ Examples: Composition options
+ | ComposeFile |
+ | docker-compose-2.yml |
+
+
+@issue_1942
+#@doNotDecompose
+Scenario: chaincode example02 with 4 peers, stop and start alternates, reverse
+ Given we compose "docker-compose-4-consensus-batch.yml"
+ And I register with CA supplying username "binhn" and secret "7avZQLwcUe9q" on peers:
+ | vp0 |
+ And I use the following credentials for querying peers:
+ | peer | username | secret |
+ | vp0 | test_user0 | MS9qrN8hFjlE |
+ | vp1 | test_user1 | jGlNl6ImkuDo |
+ | vp2 | test_user2 | zMflqOKezFiA |
+ | vp3 | test_user3 | vWdLCE00vJy0 |
+
+ When requesting "/chain" from "vp0"
+ Then I should get a JSON response with "height" = "1"
+
+ When I deploy chaincode "github.com/hyperledger/fabric/examples/chaincode/go/chaincode_example02" with ctor "init" to "vp0"
+ | arg1 | arg2 | arg3 | arg4 |
+ | a | 1000 | b | 0 |
+ Then I should have received a chaincode name
+ Then I wait up to "60" seconds for transaction to be committed to peers:
+ | vp0 | vp1 | vp2 | vp3 |
+
+ When I query chaincode "example2" function name "query" with value "a" on peers:
+ | vp0 | vp1 | vp2 | vp3 |
+ Then I should get a JSON response from peers with "result.message" = "1000"
+ | vp0 | vp1 | vp2 | vp3 |
+
+ Given I stop peers:
+ | vp2 |
+ And I register with CA supplying username "test_user3" and secret "vWdLCE00vJy0" on peers:
+ | vp3 |
+
+ When I invoke chaincode "example2" function name "invoke" on "vp3" "3" times
+ |arg1|arg2|arg3|
+ | a | b | 1 |
+ Then I should have received a transactionID
+ Then I wait up to "180" seconds for transaction to be committed to peers:
+ | vp0 | vp1 | vp3 |
+
+ When I query chaincode "example2" function name "query" with value "a" on peers:
+ | vp0 | vp1 | vp3 |
+ Then I should get a JSON response from peers with "result.message" = "997"
+ | vp0 | vp1 | vp3 |
+
+ Given I start peers:
+ | vp2 |
+ And I wait "30" seconds
+
+ Given I stop peers:
+ | vp1 |
+ When I invoke chaincode "example2" function name "invoke" on "vp3" "20" times
+ |arg1|arg2|arg3|
+ | a | b | 1 |
+ Then I should have received a transactionID
+ Then I wait up to "300" seconds for transaction to be committed to peers:
+ | vp0 | vp2 | vp3 |
+
+ When I query chaincode "example2" function name "query" with value "a" on peers:
+ | vp0 | vp2 | vp3 |
+ Then I should get a JSON response from peers with "result.message" = "977"
+ | vp0 | vp2 | vp3 |
+
+@issue_1874a
+#@doNotDecompose
+Scenario: chaincode example02 with 4 peers, two stopped
+ Given we compose "docker-compose-4-consensus-batch.yml"
+ And I register with CA supplying username "binhn" and secret "7avZQLwcUe9q" on peers:
+ | vp0 |
+ And I use the following credentials for querying peers:
+ | peer | username | secret |
+ | vp0 | test_user0 | MS9qrN8hFjlE |
+ | vp1 | test_user1 | jGlNl6ImkuDo |
+ | vp2 | test_user2 | zMflqOKezFiA |
+ | vp3 | test_user3 | vWdLCE00vJy0 |
+
+ When requesting "/chain" from "vp0"
+ Then I should get a JSON response with "height" = "1"
+
+ When I deploy chaincode "github.com/hyperledger/fabric/examples/chaincode/go/chaincode_example02" with ctor "init" to "vp0"
+ | arg1 | arg2 | arg3 | arg4 |
+ | a | 100 | b | 200 |
+ Then I should have received a chaincode name
+ Then I wait up to "60" seconds for transaction to be committed to peers:
+ | vp0 | vp1 | vp2 |
+
+ When I query chaincode "example2" function name "query" with value "a" on peers:
+ | vp0 | vp1 | vp2 | vp3 |
+ Then I should get a JSON response from peers with "result.message" = "100"
+ | vp0 | vp1 | vp2 | vp3 |
+
+ Given I stop peers:
+ | vp2 | vp3 |
+
+ When I invoke chaincode "example2" function name "invoke" on "vp0"
+ |arg1|arg2|arg3|
+ | a | b | 10 |
+ Then I should have received a transactionID
+
+ Given I start peers:
+ | vp3 |
+ And I wait "15" seconds
+
+ When I invoke chaincode "example2" function name "invoke" on "vp0" "9" times
+ |arg1|arg2|arg3|
+ | a | b | 10 |
+ Then I should have received a transactionID
+ Then I wait up to "60" seconds for transaction to be committed to peers:
+ | vp0 | vp1 | vp3 |
+
+ When I query chaincode "example2" function name "query" with value "a" on peers:
+ | vp0 | vp1 | vp3 |
+ Then I should get a JSON response from peers with "result.message" = "0"
+ | vp0 | vp1 | vp3 |
+
+@issue_1874b
+#@doNotDecompose
+Scenario: chaincode example02 with 4 peers, two stopped, bring back vp0
+ Given we compose "docker-compose-4-consensus-batch.yml"
+ And I register with CA supplying username "binhn" and secret "7avZQLwcUe9q" on peers:
+ | vp0 |
+ And I use the following credentials for querying peers:
+ | peer | username | secret |
+ | vp0 | test_user0 | MS9qrN8hFjlE |
+ | vp1 | test_user1 | jGlNl6ImkuDo |
+ | vp2 | test_user2 | zMflqOKezFiA |
+ | vp3 | test_user3 | vWdLCE00vJy0 |
+
+ When requesting "/chain" from "vp0"
+ Then I should get a JSON response with "height" = "1"
+
+ When I deploy chaincode "github.com/hyperledger/fabric/examples/chaincode/go/chaincode_example02" with ctor "init" to "vp0"
+ | arg1 | arg2 | arg3 | arg4 |
+ | a | 100 | b | 200 |
+ Then I should have received a chaincode name
+ Then I wait up to "60" seconds for transaction to be committed to peers:
+ | vp0 | vp1 | vp2 |
+
+ When I query chaincode "example2" function name "query" with value "a" on peers:
+ | vp0 | vp1 | vp2 | vp3 |
+ Then I should get a JSON response from peers with "result.message" = "100"
+ | vp0 | vp1 | vp2 | vp3 |
+
+ Given I stop peers:
+ | vp0 |
+
+ And I register with CA supplying username "test_user1" and secret "jGlNl6ImkuDo" on peers:
+ | vp1 |
+
+ When I invoke chaincode "example2" function name "invoke" on "vp1"
+ |arg1|arg2|arg3|
+ | a | b | 10 |
+ Then I should have received a transactionID
+
+ Given I stop peers:
+ | vp3 |
+
+ When I invoke chaincode "example2" function name "invoke" on "vp1"
+ |arg1|arg2|arg3|
+ | a | b | 10 |
+ Then I should have received a transactionID
+
+ Given I start peers:
+ | vp0 |
+ And I wait "15" seconds
+
+ When I invoke chaincode "example2" function name "invoke" on "vp1" "8" times
+ |arg1|arg2|arg3|
+ | a | b | 10 |
+ Then I should have received a transactionID
+ Then I wait up to "60" seconds for transaction to be committed to peers:
+ | vp0 | vp1 | vp2 |
+
+ When I query chaincode "example2" function name "query" with value "a" on peers:
+ | vp0 | vp1 | vp2 |
+ Then I should get a JSON response from peers with "result.message" = "0"
+ | vp0 | vp1 | vp2 |
+
+@issue_1874c
+#@doNotDecompose
+ Scenario: chaincode example02 with 4 peers, two stopped, bring back both
+ Given we compose "docker-compose-4-consensus-batch.yml"
+ And I register with CA supplying username "binhn" and secret "7avZQLwcUe9q" on peers:
+ | vp0 |
+ And I use the following credentials for querying peers:
+ | peer | username | secret |
+ | vp0 | test_user0 | MS9qrN8hFjlE |
+ | vp1 | test_user1 | jGlNl6ImkuDo |
+ | vp2 | test_user2 | zMflqOKezFiA |
+ | vp3 | test_user3 | vWdLCE00vJy0 |
+
+ When requesting "/chain" from "vp0"
+ Then I should get a JSON response with "height" = "1"
+
+ When I deploy chaincode "github.com/hyperledger/fabric/examples/chaincode/go/chaincode_example02" with ctor "init" to "vp0"
+ | arg1 | arg2 | arg3 | arg4 |
+ | a | 100 | b | 200 |
+ Then I should have received a chaincode name
+ Then I wait up to "60" seconds for transaction to be committed to peers:
+ | vp0 | vp1 | vp2 |
+
+ When I query chaincode "example2" function name "query" with value "a" on peers:
+ | vp0 | vp1 | vp2 | vp3 |
+ Then I should get a JSON response from peers with "result.message" = "100"
+ | vp0 | vp1 | vp2 | vp3 |
+
+ Given I stop peers:
+ | vp1 | vp2 |
+
+ When I invoke chaincode "example2" function name "invoke" on "vp0" "1" times
+ |arg1|arg2|arg3|
+ | a | b | 10 |
+ Then I should have received a transactionID
+
+ Given I start peers:
+ | vp1 | vp2 |
+ And I wait "15" seconds
+
+ When I invoke chaincode "example2" function name "invoke" on "vp0" "8" times
+ |arg1|arg2|arg3|
+ | a | b | 10 |
+ Then I should have received a transactionID
+ Then I wait up to "60" seconds for transaction to be committed to peers:
+ | vp0 | vp1 | vp2 | vp3 |
+
+ Then I wait "30" seconds
+ # For the view to change to "vp3" or "vp1"
+
+ When I query chaincode "example2" function name "query" with value "a" on peers:
+ | vp0 | vp1 | vp2 | vp3 |
+ Then I should get a JSON response from peers with "result.message" = "10"
+ | vp0 | vp1 | vp2 | vp3 |
+
+ @issue_2116
+ #@doNotDecompose
+ Scenario Outline: chaincode authorizable_counter with 4 peers, two stopped, bring back both
+ Given we compose ""
+ And I register with CA supplying username "diego" and secret "DRJ23pEQl16a" on peers:
+ | vp0 |
+ And I use the following credentials for querying peers:
+ | peer | username | secret |
+ | vp0 | test_user0 | MS9qrN8hFjlE |
+ | vp1 | test_user1 | jGlNl6ImkuDo |
+ | vp2 | test_user2 | zMflqOKezFiA |
+ | vp3 | test_user3 | vWdLCE00vJy0 |
+
+ When requesting "/chain" from "vp0"
+ Then I should get a JSON response with "height" = "1"
+
+ When I deploy chaincode "github.com/hyperledger/fabric/examples/chaincode/go/authorizable_counter" with ctor "init" to "vp0"
+ | arg1 |
+ | 0 |
+ Then I should have received a chaincode name
+ Then I wait up to "60" seconds for transaction to be committed to peers:
+ | vp0 | vp1 | vp2 |
+
+ When I query chaincode "authorizable_counter" function name "read" on "vp0":
+ |arg1|
+ | a |
+ Then I should get a JSON response with "result.message" = "0"
+
+ When I invoke chaincode "authorizable_counter" function name "increment" with attributes "position" on "vp0"
+ |arg1|
+ | a |
+ Then I should have received a transactionID
+
+ When I invoke chaincode "authorizable_counter" function name "increment" on "vp0" "8" times
+ |arg1|arg2|arg3|
+ | a | b | 10 |
+ Then I should have received a transactionID
+ Then I wait up to "30" seconds for transaction to be committed to peers:
+ | vp0 | vp1 | vp2 | vp3 |
+
+ When I query chaincode "authorizable_counter" function name "read" with value "a" on peers:
+ | vp0 | vp1 | vp2 | vp3 |
+
+ Then I should get a JSON response from peers with "result.message" = "1"
+ | vp0 | vp1 | vp2 | vp3 |
+
+ When I invoke chaincode "authorizable_counter" function name "increment" with attributes "company" on "vp0"
+ |arg1|
+ | a |
+
+ When I invoke chaincode "authorizable_counter" function name "increment" with attributes "company, position, age" on "vp0"
+ |arg1|
+ | a |
+
+ Then I wait up to "15" seconds for transaction to be committed to peers:
+ | vp0 | vp1 | vp2 | vp3 |
+
+ When I query chaincode "authorizable_counter" function name "read" with value "a" on peers:
+ | vp0 | vp1 | vp2 | vp3 |
+
+ Then I should get a JSON response from peers with "result.message" = "2"
+ | vp0 | vp1 | vp2 | vp3 |
+
+ Examples: Consensus Options
+ | ComposeFile | WaitTime |
+ | docker-compose-4-consensus-batch.yml docker-membersrvc-attributes-enabled.yml | 120 |
+ | docker-compose-4-consensus-batch.yml docker-membersrvc-attributes-enabled.yml docker-membersrvc-attributes-encryption-enabled.yml | 120 |
+
+# noop
+# @doNotDecompose
+ Scenario: noop chaincode test
+ Given we compose "docker-compose-1.yml"
+ When I invoke master chaincode "noop" function name "execute" on "vp0"
+ |arg1|
+ | aa |
+ Then I should have received a transactionID
diff --git a/bddtests/peer_logging.feature b/bddtests/peer_logging.feature
new file mode 100755
index 00000000000..58ba346b2cb
--- /dev/null
+++ b/bddtests/peer_logging.feature
@@ -0,0 +1,46 @@
+#
+# Test Logging Features of Peers
+#
+# Tags that can be used and will affect test internals:
+# @doNotDecompose will NOT decompose the named compose_yaml after scenario ends. Useful for setting up environment and reviewing after scenario.
+# @chaincodeImagesUpToDate use this if all scenarios chaincode images are up to date, and do NOT require building. BE SURE!!!
+
+Feature: Peer Logging
+ As a Fabric Developer
+ I want to verify my Peers log correctly
+
+ Scenario: Invoke is attempted after deploy in Dev Mode
+ Given we compose "docker-compose-1-devmode.yml"
+ When I deploy chaincode with name "testCC" and with ctor "init" to "vp0"
+ | arg1 | arg2 | arg3 | arg4 |
+ | a | 100 | b | 200 |
+ And I invoke chaincode "testCC" function name "invoke" on "vp0"
+ |arg1|arg2|arg3|
+ | a | b | 10 |
+ Then ensure after 2 seconds there are no errors in the logs for peer vp0
+
+ Scenario: Query is attempted after deploy in Dev Mode
+ Given we compose "docker-compose-1-devmode.yml"
+ When I deploy chaincode with name "testCC" and with ctor "init" to "vp0"
+ | arg1 | arg2 | arg3 | arg4 |
+ | a | 100 | b | 200 |
+ And I query chaincode "testCC" function name "query" on "vp0":
+ |arg1|
+ | a |
+ Then ensure after 2 seconds there are no errors in the logs for peer vp0
+
+ Scenario: Invoke is attempted before deploy in Dev Mode
+ Given we compose "docker-compose-1-devmode.yml"
+ When I mock deploy chaincode with name "testCC"
+ And I invoke chaincode "testCC" function name "invoke" on "vp0"
+ |arg1|arg2|arg3|
+ | a | b | 10 |
+ Then I wait up to 5 seconds for an error in the logs for peer vp0
+
+ Scenario: Query is attempted before deploy in Dev Mode
+ Given we compose "docker-compose-1-devmode.yml"
+ When I mock deploy chaincode with name "testCC"
+ And I query chaincode "testCC" function name "query" on "vp0":
+ |arg1|
+ | a |
+ Then I wait up to 5 seconds for an error in the logs for peer vp0
\ No newline at end of file
diff --git a/bddtests/sdk.feature b/bddtests/sdk.feature
new file mode 100644
index 00000000000..230d30459e8
--- /dev/null
+++ b/bddtests/sdk.feature
@@ -0,0 +1,27 @@
+#
+# Test the Hyperledger SDK
+#
+# Tags that can be used and will affect test internals:
+#
+# @doNotDecompose will NOT decompose the named compose_yaml after scenario ends. Useful for setting up environment and reviewing after scenario.
+#
+
+Feature: Node SDK
+ As a HyperLedger developer
+ I want to have a single test driver for the various Hyperledger SDKs
+
+
+ @doNotDecompose
+ @sdk
+ Scenario Outline: test initial sdk setup
+
+ Given we compose ""
+ And I wait "5" seconds
+ And I register thru the sample SDK app supplying username "WebAppAdmin" and secret "DJY27pEnl16d" on "sampleApp0"
+ Then I should get a JSON response with "foo" = "bar"
+
+
+ Examples: Consensus Options
+ | ComposeFile | WaitTime |
+ | docker-compose-4-consensus-batch.yml docker-compose-sdk-node.yml | 60 |
+ #| docker-compose-4-consensus-batch.yml docker-compose-sdk-java.yml | 60 |
diff --git a/bddtests/server_admin_pb2.py b/bddtests/server_admin_pb2.py
new file mode 100644
index 00000000000..d042955bf5b
--- /dev/null
+++ b/bddtests/server_admin_pb2.py
@@ -0,0 +1,195 @@
+# Generated by the protocol buffer compiler. DO NOT EDIT!
+# source: server_admin.proto
+
+import sys
+_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
+from google.protobuf import descriptor as _descriptor
+from google.protobuf import message as _message
+from google.protobuf import reflection as _reflection
+from google.protobuf import symbol_database as _symbol_database
+from google.protobuf import descriptor_pb2
+# @@protoc_insertion_point(imports)
+
+_sym_db = _symbol_database.Default()
+
+
+from google.protobuf import empty_pb2 as google_dot_protobuf_dot_empty__pb2
+
+
+DESCRIPTOR = _descriptor.FileDescriptor(
+ name='server_admin.proto',
+ package='protos',
+ syntax='proto3',
+ serialized_pb=_b('\n\x12server_admin.proto\x12\x06protos\x1a\x1bgoogle/protobuf/empty.proto\"\x9a\x01\n\x0cServerStatus\x12/\n\x06status\x18\x01 \x01(\x0e\x32\x1f.protos.ServerStatus.StatusCode\"Y\n\nStatusCode\x12\r\n\tUNDEFINED\x10\x00\x12\x0b\n\x07STARTED\x10\x01\x12\x0b\n\x07STOPPED\x10\x02\x12\n\n\x06PAUSED\x10\x03\x12\t\n\x05\x45RROR\x10\x04\x12\x0b\n\x07UNKNOWN\x10\x05\x32\xc1\x01\n\x05\x41\x64min\x12;\n\tGetStatus\x12\x16.google.protobuf.Empty\x1a\x14.protos.ServerStatus\"\x00\x12=\n\x0bStartServer\x12\x16.google.protobuf.Empty\x1a\x14.protos.ServerStatus\"\x00\x12<\n\nStopServer\x12\x16.google.protobuf.Empty\x1a\x14.protos.ServerStatus\"\x00\x62\x06proto3')
+ ,
+ dependencies=[google_dot_protobuf_dot_empty__pb2.DESCRIPTOR,])
+_sym_db.RegisterFileDescriptor(DESCRIPTOR)
+
+
+
+_SERVERSTATUS_STATUSCODE = _descriptor.EnumDescriptor(
+ name='StatusCode',
+ full_name='protos.ServerStatus.StatusCode',
+ filename=None,
+ file=DESCRIPTOR,
+ values=[
+ _descriptor.EnumValueDescriptor(
+ name='UNDEFINED', index=0, number=0,
+ options=None,
+ type=None),
+ _descriptor.EnumValueDescriptor(
+ name='STARTED', index=1, number=1,
+ options=None,
+ type=None),
+ _descriptor.EnumValueDescriptor(
+ name='STOPPED', index=2, number=2,
+ options=None,
+ type=None),
+ _descriptor.EnumValueDescriptor(
+ name='PAUSED', index=3, number=3,
+ options=None,
+ type=None),
+ _descriptor.EnumValueDescriptor(
+ name='ERROR', index=4, number=4,
+ options=None,
+ type=None),
+ _descriptor.EnumValueDescriptor(
+ name='UNKNOWN', index=5, number=5,
+ options=None,
+ type=None),
+ ],
+ containing_type=None,
+ options=None,
+ serialized_start=125,
+ serialized_end=214,
+)
+_sym_db.RegisterEnumDescriptor(_SERVERSTATUS_STATUSCODE)
+
+
+_SERVERSTATUS = _descriptor.Descriptor(
+ name='ServerStatus',
+ full_name='protos.ServerStatus',
+ filename=None,
+ file=DESCRIPTOR,
+ containing_type=None,
+ fields=[
+ _descriptor.FieldDescriptor(
+ name='status', full_name='protos.ServerStatus.status', index=0,
+ number=1, type=14, cpp_type=8, label=1,
+ has_default_value=False, default_value=0,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None),
+ ],
+ extensions=[
+ ],
+ nested_types=[],
+ enum_types=[
+ _SERVERSTATUS_STATUSCODE,
+ ],
+ options=None,
+ is_extendable=False,
+ syntax='proto3',
+ extension_ranges=[],
+ oneofs=[
+ ],
+ serialized_start=60,
+ serialized_end=214,
+)
+
+_SERVERSTATUS.fields_by_name['status'].enum_type = _SERVERSTATUS_STATUSCODE
+_SERVERSTATUS_STATUSCODE.containing_type = _SERVERSTATUS
+DESCRIPTOR.message_types_by_name['ServerStatus'] = _SERVERSTATUS
+
+ServerStatus = _reflection.GeneratedProtocolMessageType('ServerStatus', (_message.Message,), dict(
+ DESCRIPTOR = _SERVERSTATUS,
+ __module__ = 'server_admin_pb2'
+ # @@protoc_insertion_point(class_scope:protos.ServerStatus)
+ ))
+_sym_db.RegisterMessage(ServerStatus)
+
+
+import abc
+import six
+from grpc.beta import implementations as beta_implementations
+from grpc.beta import interfaces as beta_interfaces
+from grpc.framework.common import cardinality
+from grpc.framework.interfaces.face import utilities as face_utilities
+
+class BetaAdminServicer(object):
+ """Interface exported by the server.
+ """
+ def GetStatus(self, request, context):
+ """Return the serve status.
+ """
+ context.code(beta_interfaces.StatusCode.UNIMPLEMENTED)
+ def StartServer(self, request, context):
+ context.code(beta_interfaces.StatusCode.UNIMPLEMENTED)
+ def StopServer(self, request, context):
+ context.code(beta_interfaces.StatusCode.UNIMPLEMENTED)
+
+class BetaAdminStub(object):
+ """Interface exported by the server.
+ """
+ def GetStatus(self, request, timeout):
+ """Return the serve status.
+ """
+ raise NotImplementedError()
+ GetStatus.future = None
+ def StartServer(self, request, timeout):
+ raise NotImplementedError()
+ StartServer.future = None
+ def StopServer(self, request, timeout):
+ raise NotImplementedError()
+ StopServer.future = None
+
+def beta_create_Admin_server(servicer, pool=None, pool_size=None, default_timeout=None, maximum_timeout=None):
+ import google.protobuf.empty_pb2
+ import server_admin_pb2
+ import google.protobuf.empty_pb2
+ import server_admin_pb2
+ import google.protobuf.empty_pb2
+ import server_admin_pb2
+ request_deserializers = {
+ ('protos.Admin', 'GetStatus'): google.protobuf.empty_pb2.Empty.FromString,
+ ('protos.Admin', 'StartServer'): google.protobuf.empty_pb2.Empty.FromString,
+ ('protos.Admin', 'StopServer'): google.protobuf.empty_pb2.Empty.FromString,
+ }
+ response_serializers = {
+ ('protos.Admin', 'GetStatus'): server_admin_pb2.ServerStatus.SerializeToString,
+ ('protos.Admin', 'StartServer'): server_admin_pb2.ServerStatus.SerializeToString,
+ ('protos.Admin', 'StopServer'): server_admin_pb2.ServerStatus.SerializeToString,
+ }
+ method_implementations = {
+ ('protos.Admin', 'GetStatus'): face_utilities.unary_unary_inline(servicer.GetStatus),
+ ('protos.Admin', 'StartServer'): face_utilities.unary_unary_inline(servicer.StartServer),
+ ('protos.Admin', 'StopServer'): face_utilities.unary_unary_inline(servicer.StopServer),
+ }
+ server_options = beta_implementations.server_options(request_deserializers=request_deserializers, response_serializers=response_serializers, thread_pool=pool, thread_pool_size=pool_size, default_timeout=default_timeout, maximum_timeout=maximum_timeout)
+ return beta_implementations.server(method_implementations, options=server_options)
+
+def beta_create_Admin_stub(channel, host=None, metadata_transformer=None, pool=None, pool_size=None):
+ import google.protobuf.empty_pb2
+ import server_admin_pb2
+ import google.protobuf.empty_pb2
+ import server_admin_pb2
+ import google.protobuf.empty_pb2
+ import server_admin_pb2
+ request_serializers = {
+ ('protos.Admin', 'GetStatus'): google.protobuf.empty_pb2.Empty.SerializeToString,
+ ('protos.Admin', 'StartServer'): google.protobuf.empty_pb2.Empty.SerializeToString,
+ ('protos.Admin', 'StopServer'): google.protobuf.empty_pb2.Empty.SerializeToString,
+ }
+ response_deserializers = {
+ ('protos.Admin', 'GetStatus'): server_admin_pb2.ServerStatus.FromString,
+ ('protos.Admin', 'StartServer'): server_admin_pb2.ServerStatus.FromString,
+ ('protos.Admin', 'StopServer'): server_admin_pb2.ServerStatus.FromString,
+ }
+ cardinalities = {
+ 'GetStatus': cardinality.Cardinality.UNARY_UNARY,
+ 'StartServer': cardinality.Cardinality.UNARY_UNARY,
+ 'StopServer': cardinality.Cardinality.UNARY_UNARY,
+ }
+ stub_options = beta_implementations.stub_options(host=host, metadata_transformer=metadata_transformer, request_serializers=request_serializers, response_deserializers=response_deserializers, thread_pool=pool, thread_pool_size=pool_size)
+ return beta_implementations.dynamic_stub(channel, 'protos.Admin', cardinalities, options=stub_options)
+# @@protoc_insertion_point(module_scope)
diff --git a/bddtests/steps/__init__.py b/bddtests/steps/__init__.py
new file mode 100644
index 00000000000..e69de29bb2d
diff --git a/bddtests/steps/bdd_grpc_util.py b/bddtests/steps/bdd_grpc_util.py
new file mode 100644
index 00000000000..8c621ffee9c
--- /dev/null
+++ b/bddtests/steps/bdd_grpc_util.py
@@ -0,0 +1,143 @@
+
+# Copyright IBM Corp. 2016 All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+import os
+import re
+import subprocess
+import devops_pb2
+import fabric_pb2
+import chaincode_pb2
+
+import bdd_test_util
+
+from grpc.beta import implementations
+
+def getSecretForUserRegistration(userRegistration):
+ return devops_pb2.Secret(enrollId=userRegistration.secretMsg['enrollId'],enrollSecret=userRegistration.secretMsg['enrollSecret'])
+
+def getTxResult(context, enrollId):
+ '''Returns the TransactionResult using the enrollId supplied'''
+ assert 'users' in context, "users not found in context. Did you register a user?"
+ assert 'compose_containers' in context, "compose_containers not found in context"
+
+ (channel, userRegistration) = getGRPCChannelAndUser(context, enrollId)
+ stub = devops_pb2.beta_create_Devops_stub(channel)
+
+ txRequest = devops_pb2.TransactionRequest(transactionUuid = context.transactionID)
+ response = stub.GetTransactionResult(txRequest, 2)
+ assert response.status == fabric_pb2.Response.SUCCESS, 'Failure getting Transaction Result from {0}, for user "{1}": {2}'.format(userRegistration.composeService,enrollId, response.msg)
+ # Now grab the TransactionResult from the Msg bytes
+ txResult = fabric_pb2.TransactionResult()
+ txResult.ParseFromString(response.msg)
+ return txResult
+
+def getGRPCChannel(ipAddress):
+ channel = implementations.insecure_channel(ipAddress, 30303)
+ print("Returning GRPC for address: {0}".format(ipAddress))
+ return channel
+
+def getGRPCChannelAndUser(context, enrollId):
+ '''Returns a tuple of GRPC channel and UserRegistration instance. The channel is open to the composeService that the user registered with.'''
+ userRegistration = bdd_test_util.getUserRegistration(context, enrollId)
+
+ # Get the IP address of the server that the user registered on
+ ipAddress = bdd_test_util.ipFromContainerNamePart(userRegistration.composeService, context.compose_containers)
+
+ channel = getGRPCChannel(ipAddress)
+
+ return (channel, userRegistration)
+
+
+def getDeployment(context, ccAlias):
+ '''Return a deployment with chaincode alias from prior deployment, or None if not found'''
+ deployment = None
+ if 'deployments' in context:
+ pass
+ else:
+ context.deployments = {}
+ if ccAlias in context.deployments:
+ deployment = context.deployments[ccAlias]
+ # else:
+ # raise Exception("Deployment alias not found: '{0}'. Are you sure you have deployed a chaincode with this alias?".format(ccAlias))
+ return deployment
+
+def deployChaincode(context, enrollId, chaincodePath, ccAlias, ctor):
+ '''Deploy a chaincode with the specified alias for the specfied enrollId'''
+ (channel, userRegistration) = getGRPCChannelAndUser(context, enrollId)
+ stub = devops_pb2.beta_create_Devops_stub(channel)
+
+ # Make sure deployment alias does NOT already exist
+ assert getDeployment(context, ccAlias) == None, "Deployment alias already exists: '{0}'.".format(ccAlias)
+
+ args = getArgsFromContextForUser(context, enrollId)
+ ccSpec = chaincode_pb2.ChaincodeSpec(type = chaincode_pb2.ChaincodeSpec.GOLANG,
+ chaincodeID = chaincode_pb2.ChaincodeID(name="",path=chaincodePath),
+ ctorMsg = chaincode_pb2.ChaincodeInput(function = ctor, args = args))
+ ccSpec.secureContext = userRegistration.getUserName()
+ if 'metadata' in context:
+ ccSpec.metadata = context.metadata
+ try:
+ ccDeploymentSpec = stub.Deploy(ccSpec, 60)
+ ccSpec.chaincodeID.name = ccDeploymentSpec.chaincodeSpec.chaincodeID.name
+ context.grpcChaincodeSpec = ccSpec
+ context.deployments[ccAlias] = ccSpec
+ except:
+ del stub
+ raise
+
+def invokeChaincode(context, enrollId, ccAlias, functionName):
+ # Get the deployment for the supplied chaincode alias
+ deployedCcSpec = getDeployment(context, ccAlias)
+ assert deployedCcSpec != None, "Deployment NOT found for chaincode alias '{0}'".format(ccAlias)
+
+ # Create a new ChaincodeSpec by copying the deployed one
+ newChaincodeSpec = chaincode_pb2.ChaincodeSpec()
+ newChaincodeSpec.CopyFrom(deployedCcSpec)
+
+ # Update hte chaincodeSpec ctorMsg for invoke
+ args = getArgsFromContextForUser(context, enrollId)
+
+ chaincodeInput = chaincode_pb2.ChaincodeInput(function = functionName, args = args )
+ newChaincodeSpec.ctorMsg.CopyFrom(chaincodeInput)
+
+ ccInvocationSpec = chaincode_pb2.ChaincodeInvocationSpec(chaincodeSpec = newChaincodeSpec)
+
+ (channel, userRegistration) = getGRPCChannelAndUser(context, enrollId)
+
+ stub = devops_pb2.beta_create_Devops_stub(channel)
+ response = stub.Invoke(ccInvocationSpec,2)
+ return response
+
+def getArgsFromContextForUser(context, enrollId):
+ # Update the chaincodeSpec ctorMsg for invoke
+ args = []
+ if 'table' in context:
+ # There are function arguments
+ userRegistration = bdd_test_util.getUserRegistration(context, enrollId)
+ # Allow the user to specify expressions referencing tags in the args list
+ pattern = re.compile('\{(.*)\}$')
+ for arg in context.table[0].cells:
+ m = pattern.match(arg)
+ if m:
+ # tagName reference found in args list
+ tagName = m.groups()[0]
+ # make sure the tagName is found in the users tags
+ assert tagName in userRegistration.tags, "TagName '{0}' not found for user '{1}'".format(tagName, userRegistration.getUserName())
+ args.append(userRegistration.tags[tagName])
+ else:
+ #No tag referenced, pass the arg
+ args.append(arg)
+ return args
diff --git a/bddtests/steps/bdd_test_util.py b/bddtests/steps/bdd_test_util.py
new file mode 100644
index 00000000000..77dfae23c66
--- /dev/null
+++ b/bddtests/steps/bdd_test_util.py
@@ -0,0 +1,123 @@
+
+# Copyright IBM Corp. 2016 All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+import os
+import re
+import subprocess
+
+def cli_call(context, arg_list, expect_success=True):
+ """Executes a CLI command in a subprocess and return the results.
+
+ @param context: the behave context
+ @param arg_list: a list command arguments
+ @param expect_success: use False to return even if an error occurred when executing the command
+ @return: (string, string, int) output message, error message, return code
+ """
+ #arg_list[0] = "update-" + arg_list[0]
+
+ # We need to run the cli command by actually calling the python command
+ # the update-cli.py script has a #!/bin/python as the first line
+ # which calls the system python, not the virtual env python we
+ # setup for running the update-cli
+ p = subprocess.Popen(arg_list, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
+ output, error = p.communicate()
+ if p.returncode != 0:
+ if output is not None:
+ print("Output:\n" + output)
+ if error is not None:
+ print("Error Message:\n" + error)
+ if expect_success:
+ raise subprocess.CalledProcessError(p.returncode, arg_list, output)
+ return output, error, p.returncode
+
+class UserRegistration:
+ def __init__(self, secretMsg, composeService):
+ self.secretMsg = secretMsg
+ self.composeService = composeService
+ self.tags = {}
+ self.lastResult = None
+
+ def getUserName(self):
+ return self.secretMsg['enrollId']
+
+# Registerses a user on a specific composeService
+def registerUser(context, secretMsg, composeService):
+ userName = secretMsg['enrollId']
+ if 'users' in context:
+ pass
+ else:
+ context.users = {}
+ if userName in context.users:
+ raise Exception("User already registered: {0}".format(userName))
+ context.users[userName] = UserRegistration(secretMsg, composeService)
+
+# Registerses a user on a specific composeService
+def getUserRegistration(context, enrollId):
+ userRegistration = None
+ if 'users' in context:
+ pass
+ else:
+ context.users = {}
+ if enrollId in context.users:
+ userRegistration = context.users[enrollId]
+ else:
+ raise Exception("User has not been registered: {0}".format(enrollId))
+ return userRegistration
+
+
+def ipFromContainerNamePart(namePart, containerDataList):
+ """Returns the IPAddress based upon a name part of the full container name"""
+ containerData = containerDataFromNamePart(namePart, containerDataList)
+
+ if containerData == None:
+ raise Exception("Could not find container with namePart = {0}".format(namePart))
+
+ return containerData.ipAddress
+
+def fullNameFromContainerNamePart(namePart, containerDataList):
+ containerData = containerDataFromNamePart(namePart, containerDataList)
+
+ if containerData == None:
+ raise Exception("Could not find container with namePart = {0}".format(namePart))
+
+ return containerData.containerName
+
+def containerDataFromNamePart(namePart, containerDataList):
+ containerNamePrefix = os.path.basename(os.getcwd()) + "_"
+ fullContainerName = containerNamePrefix + namePart
+
+ for containerData in containerDataList:
+ if containerData.containerName.startswith(fullContainerName):
+ return containerData
+
+ return None
+
+def getContainerDataValuesFromContext(context, aliases, callback):
+ """Returns the IPAddress based upon a name part of the full container name"""
+ assert 'compose_containers' in context, "compose_containers not found in context"
+ values = []
+ containerNamePrefix = os.path.basename(os.getcwd()) + "_"
+ for namePart in aliases:
+ for containerData in context.compose_containers:
+ if containerData.containerName.startswith(containerNamePrefix + namePart):
+ values.append(callback(containerData))
+ break
+ return values
+
+
+def start_background_process(context, program_name, arg_list):
+ p = subprocess.Popen(arg_list, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
+ setattr(context, program_name, p)
diff --git a/bddtests/steps/chaincode_rbac_impl.py b/bddtests/steps/chaincode_rbac_impl.py
new file mode 100644
index 00000000000..4c70978a568
--- /dev/null
+++ b/bddtests/steps/chaincode_rbac_impl.py
@@ -0,0 +1,138 @@
+import os
+import re
+import time
+import copy
+import base64
+from datetime import datetime, timedelta
+
+import sys, requests, json
+
+import bdd_test_util
+
+import bdd_grpc_util
+from grpc.beta import implementations
+
+import fabric_pb2
+import chaincode_pb2
+import devops_pb2
+
+LAST_REQUESTED_TCERT="lastRequestedTCert"
+
+
+@when(u'user "{enrollId}" requests a new application TCert')
+def step_impl(context, enrollId):
+ assert 'users' in context, "users not found in context. Did you register a user?"
+ (channel, userRegistration) = bdd_grpc_util.getGRPCChannelAndUser(context, enrollId)
+
+ stub = devops_pb2.beta_create_Devops_stub(channel)
+
+ secret = bdd_grpc_util.getSecretForUserRegistration(userRegistration)
+ response = stub.EXP_GetApplicationTCert(secret,2)
+ assert response.status == fabric_pb2.Response.SUCCESS, 'Failure getting TCert from {0}, for user "{1}": {2}'.format(userRegistration.composeService,enrollId, response.msg)
+ tcert = response.msg
+
+ userRegistration.lastResult = tcert
+
+@when(u'user "{enrollId}" stores their last result as "{tagName}"')
+def step_impl(context, enrollId, tagName):
+ assert 'users' in context, "users not found in context. Did you register a user?"
+ # Retrieve the userRegistration from the context
+ userRegistration = bdd_test_util.getUserRegistration(context, enrollId)
+ userRegistration.tags[tagName] = userRegistration.lastResult
+
+@when(u'user "{enrollId}" sets metadata to their stored value "{tagName}"')
+def step_impl(context, enrollId, tagName):
+ assert 'users' in context, "users not found in context. Did you register a user?"
+ # Retrieve the userRegistration from the context
+ userRegistration = bdd_test_util.getUserRegistration(context, enrollId)
+ assert tagName in userRegistration.tags, 'Tag "{0}" not found in user "{1}" tags'.format(tagName, enrollId)
+ context.metadata = userRegistration.tags[tagName]
+
+
+@when(u'user "{enrollId}" deploys chaincode "{chaincodePath}" aliased as "{ccAlias}" with ctor "{ctor}" and args')
+def step_impl(context, enrollId, chaincodePath, ccAlias, ctor):
+ bdd_grpc_util.deployChaincode(context, enrollId, chaincodePath, ccAlias, ctor)
+
+
+@when(u'user "{enrollId}" gives stored value "{tagName}" to "{recipientEnrollId}"')
+def step_impl(context, enrollId, tagName, recipientEnrollId):
+ assert 'users' in context, "users not found in context. Did you register a user?"
+ # Retrieve the userRegistration from the context
+ userRegistration = bdd_test_util.getUserRegistration(context, enrollId)
+ recipientUserRegistration = bdd_test_util.getUserRegistration(context, recipientEnrollId)
+ # Copy value from target to recipient
+ recipientUserRegistration.tags[tagName] = userRegistration.tags[tagName]
+
+
+@when(u'"{enrollId}" uses application TCert "{assignerAppTCert}" to assign role "{role}" to application TCert "{assigneeAppTCert}"')
+def step_impl(context, enrollId, assignerAppTCert, role, assigneeAppTCert):
+ assert 'users' in context, "users not found in context. Did you register a user?"
+ assert 'compose_containers' in context, "compose_containers not found in context"
+
+ (channel, userRegistration) = bdd_grpc_util.getGRPCChannelAndUser(context, enrollId)
+
+ stub = devops_pb2.beta_create_Devops_stub(channel)
+
+ # First get binding with EXP_PrepareForTx
+ secret = bdd_grpc_util.getSecretForUserRegistration(userRegistration)
+ response = stub.EXP_PrepareForTx(secret,2)
+ assert response.status == fabric_pb2.Response.SUCCESS, 'Failure getting Binding from {0}, for user "{1}": {2}'.format(userRegistration.composeService,enrollId, response.msg)
+ binding = response.msg
+
+ # Now produce the sigma EXP_ProduceSigma
+ chaincodeInput = chaincode_pb2.ChaincodeInput(function = "addRole", args = (base64.b64encode(userRegistration.tags[assigneeAppTCert]), role) )
+ chaincodeInputRaw = chaincodeInput.SerializeToString()
+ appTCert = userRegistration.tags[assignerAppTCert]
+ sigmaInput = devops_pb2.SigmaInput(secret = secret, appTCert = appTCert, data = chaincodeInputRaw + binding)
+ response = stub.EXP_ProduceSigma(sigmaInput,2)
+ assert response.status == fabric_pb2.Response.SUCCESS, 'Failure prducing sigma from {0}, for user "{1}": {2}'.format(userRegistration.composeService,enrollId, response.msg)
+ sigmaOutputBytes = response.msg
+ # Parse the msg bytes as a SigmaOutput message
+ sigmaOutput = devops_pb2.SigmaOutput()
+ sigmaOutput.ParseFromString(sigmaOutputBytes)
+ print('Length of sigma = {0}'.format(len(sigmaOutput.sigma)))
+
+ # Now execute the transaction with the saved binding, EXP_ExecuteWithBinding
+ assert "grpcChaincodeSpec" in context, "grpcChaincodeSpec NOT found in context"
+ newChaincodeSpec = chaincode_pb2.ChaincodeSpec()
+ newChaincodeSpec.CopyFrom(context.grpcChaincodeSpec)
+ newChaincodeSpec.metadata = sigmaOutput.asn1Encoding
+ newChaincodeSpec.ctorMsg.CopyFrom(chaincodeInput)
+
+ ccInvocationSpec = chaincode_pb2.ChaincodeInvocationSpec(chaincodeSpec = newChaincodeSpec)
+
+ executeWithBinding = devops_pb2.ExecuteWithBinding(chaincodeInvocationSpec = ccInvocationSpec, binding = binding)
+
+ response = stub.EXP_ExecuteWithBinding(executeWithBinding,60)
+ assert response.status == fabric_pb2.Response.SUCCESS, 'Failure executeWithBinding from {0}, for user "{1}": {2}'.format(userRegistration.composeService,enrollId, response.msg)
+ context.response = response
+ context.transactionID = response.msg
+
+
+@then(u'"{enrollId}"\'s last transaction should have failed with message that contains "{msg}"')
+def step_impl(context, enrollId, msg):
+ assert 'users' in context, "users not found in context. Did you register a user?"
+ assert 'compose_containers' in context, "compose_containers not found in context"
+ txResult = bdd_grpc_util.getTxResult(context, enrollId)
+ assert txResult.errorCode > 0, "Expected failure (errorCode > 0), instead found errorCode={0}".format(txResult.errorCode)
+ assert msg in txResult.error, "Expected error to contain'{0}', instead found '{1}".format(msg, txResult.error)
+
+@then(u'"{enrollId}"\'s last transaction should have succeeded')
+def step_impl(context, enrollId):
+ txResult = bdd_grpc_util.getTxResult(context, enrollId)
+ assert txResult.errorCode == 0, "Expected success (errorCode == 0), instead found errorCode={0}, error={1}".format(txResult.errorCode, txResult.error)
+
+@when(u'user "{enrollId}" invokes chaincode "{ccAlias}" function name "{functionName}" with args')
+def step_impl(context, enrollId, ccAlias, functionName):
+ response = bdd_grpc_util.invokeChaincode(context, enrollId, ccAlias, functionName)
+ context.response = response
+ context.transactionID = response.msg
+ #assert response.status == fabric_pb2.Response.SUCCESS, 'Failure invoking chaincode {0} on {1}, for user "{2}": {3}'.format(ccAlias, userRegistration.composeService,enrollId, response.msg)
+
+@given(u'user "{enrollId}" stores a reference to chaincode "{ccAlias}" as "{tagName}"')
+def step_impl(context, enrollId, ccAlias, tagName):
+ # Retrieve the userRegistration from the context
+ userRegistration = bdd_test_util.getUserRegistration(context, enrollId)
+ deployedCcSpec = bdd_grpc_util.getDeployment(context, ccAlias)
+ assert deployedCcSpec != None, "Deployment NOT found for chaincode alias '{0}'".format(ccAlias)
+ userRegistration.tags[tagName] = deployedCcSpec.chaincodeID.name
diff --git a/bddtests/steps/coverage.py b/bddtests/steps/coverage.py
new file mode 100644
index 00000000000..dc04579650b
--- /dev/null
+++ b/bddtests/steps/coverage.py
@@ -0,0 +1,94 @@
+import cStringIO
+import os
+import glob
+import errno
+from collections import OrderedDict
+
+import bdd_test_util
+
+
+def testCoverage():
+ #First save the coverage files
+ saveCoverageFiles("coverage","scenario_1", ["bddtests_vp0_1","bddtests_vp1_1","bddtests_vp2_1","bddtests_vp3_1",], "cov")
+
+ # Now collect the filenames for coverage files.
+ files = glob.glob(os.path.join('coverage','*.cov'))
+
+ #Create the aggregate coverage file
+ coverageContents = createCoverageFile(files)
+
+ #Ouput the aggregate coverage file
+ with open('coverage.total', 'w') as outfile:
+ outfile.write(coverageContents)
+ outfile.close()
+
+def createCoverageAggregate():
+ # Now collect the filenames for coverage files.
+ files = glob.glob(os.path.join('coverage','*.cov'))
+
+ #Create the aggregate coverage file
+ coverageContents = createCoverageFile(files)
+
+ #Ouput the aggregate coverage file
+ with open('coverage-behave.cov', 'w') as outfile:
+ outfile.write(coverageContents)
+ outfile.close()
+
+
+def saveCoverageFiles(folderName, rootName, containerNames, extension):
+ 'Will save the converage files to folderName'
+ # Make the directory
+ try:
+ os.makedirs(folderName)
+ except OSError as exception:
+ if exception.errno != errno.EEXIST:
+ raise
+ for containerName in containerNames:
+ srcPath = "{0}:/opt/gopath/src/github.com/hyperledger/fabric/coverage.cov".format(containerName)
+ print("sourcepath = {0}".format(srcPath))
+ destPath = os.path.join(folderName, "{0}-{1}.{2}".format(rootName, containerName, extension))
+ output, error, returncode = \
+ bdd_test_util.cli_call(None, ["docker"] + ["cp", srcPath, destPath], expect_success=False)
+
+def testCreateSystemCoverageFile(folderName, rootName, containerNames, extension):
+ 'Will create a single aggregate coverage file fromsave the converage files to folderName'
+ files = glob.glob(os.path.join('coverage','*.cov'))
+ for containerName in containerNames:
+ srcPath = "{0}:/opt/gopath/src/github.com/hyperledger/fabric/peer/coverage.cov".format(containerName)
+ destPath = os.path.join(folderName, "{0}-{1}.{2}".format(rootName, containerName, extension))
+ output, error, returncode = \
+ bdd_test_util.cli_call(None, ["docker"] + ["cp", srcPath, destPath], expect_success=False)
+
+
+def createCoverageFile(filenames):
+ """Creates an aggregated coverage file"""
+ output = cStringIO.StringIO()
+ output.write('mode: count\n')
+ linesMap = {}
+ #with open('coverage.total', 'w') as outfile:
+ for fname in filenames:
+ with open(fname) as infile:
+ firstLine = True
+ for line in infile:
+ if firstLine:
+ firstLine = False
+ continue
+ else:
+ # Split the line based upon white space
+ lineParts = line.split()
+ if lineParts[0] in linesMap:
+ # Found, keep the greater
+ newCount = long(lineParts[2])
+ oldCount = long(linesMap[lineParts[0]].split()[2])
+ if newCount > oldCount:
+ linesMap[lineParts[0]] = line
+ else:
+ linesMap[lineParts[0]] = line
+ # Now sort the output
+ od = OrderedDict(sorted(linesMap.items(), key=lambda i: i[1]))
+
+ for (key, line) in od.items():
+ output.write(line)
+ contents = output.getvalue()
+ output.close()
+ return contents
diff --git a/bddtests/steps/peer_basic_impl.py b/bddtests/steps/peer_basic_impl.py
new file mode 100644
index 00000000000..c4b285b80d6
--- /dev/null
+++ b/bddtests/steps/peer_basic_impl.py
@@ -0,0 +1,772 @@
+#
+# Copyright IBM Corp. 2016 All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+import os
+import os.path
+import re
+import time
+import copy
+from datetime import datetime, timedelta
+
+import sys, requests, json
+
+import bdd_test_util
+
+CORE_REST_PORT = 5000
+JSONRPC_VERSION = "2.0"
+
+class ContainerData:
+ def __init__(self, containerName, ipAddress, envFromInspect, composeService):
+ self.containerName = containerName
+ self.ipAddress = ipAddress
+ self.envFromInspect = envFromInspect
+ self.composeService = composeService
+
+ def getEnv(self, key):
+ envValue = None
+ for val in self.envFromInspect:
+ if val.startswith(key):
+ envValue = val[len(key):]
+ break
+ if envValue == None:
+ raise Exception("ENV key not found ({0}) for container ({1})".format(key, self.containerName))
+ return envValue
+
+def parseComposeOutput(context):
+ """Parses the compose output results and set appropriate values into context. Merges existing with newly composed."""
+ # Use the prefix to get the container name
+ containerNamePrefix = os.path.basename(os.getcwd()) + "_"
+ containerNames = []
+ for l in context.compose_error.splitlines():
+ tokens = l.split()
+ print(tokens)
+ if 1 < len(tokens):
+ thisContainer = tokens[1]
+ if containerNamePrefix not in thisContainer:
+ thisContainer = containerNamePrefix + thisContainer + "_1"
+ if thisContainer not in containerNames:
+ containerNames.append(thisContainer)
+
+ print("Containers started: ")
+ print(containerNames)
+ # Now get the Network Address for each name, and set the ContainerData onto the context.
+ containerDataList = []
+ for containerName in containerNames:
+ output, error, returncode = \
+ bdd_test_util.cli_call(context, ["docker", "inspect", "--format", "{{ .NetworkSettings.IPAddress }}", containerName], expect_success=True)
+ print("container {0} has address = {1}".format(containerName, output.splitlines()[0]))
+ ipAddress = output.splitlines()[0]
+
+ # Get the environment array
+ output, error, returncode = \
+ bdd_test_util.cli_call(context, ["docker", "inspect", "--format", "{{ .Config.Env }}", containerName], expect_success=True)
+ env = output.splitlines()[0][1:-1].split()
+
+ # Get the Labels to access the com.docker.compose.service value
+ output, error, returncode = \
+ bdd_test_util.cli_call(context, ["docker", "inspect", "--format", "{{ .Config.Labels }}", containerName], expect_success=True)
+ labels = output.splitlines()[0][4:-1].split()
+ dockerComposeService = [composeService[27:] for composeService in labels if composeService.startswith("com.docker.compose.service:")][0]
+ print("dockerComposeService = {0}".format(dockerComposeService))
+ print("container {0} has env = {1}".format(containerName, env))
+ containerDataList.append(ContainerData(containerName, ipAddress, env, dockerComposeService))
+ # Now merge the new containerData info with existing
+ newContainerDataList = []
+ if "compose_containers" in context:
+ # Need to merge I new list
+ newContainerDataList = context.compose_containers
+ newContainerDataList = newContainerDataList + containerDataList
+
+ setattr(context, "compose_containers", newContainerDataList)
+ print("")
+
+def buildUrl(context, ipAddress, path):
+ schema = "http"
+ if 'TLS' in context.tags:
+ schema = "https"
+ return "{0}://{1}:{2}{3}".format(schema, ipAddress, CORE_REST_PORT, path)
+
+def currentTime():
+ return time.strftime("%H:%M:%S")
+
+def getDockerComposeFileArgsFromYamlFile(compose_yaml):
+ parts = compose_yaml.split()
+ args = []
+ for part in parts:
+ args = args + ["-f"] + [part]
+ return args
+
+@given(u'we compose "{composeYamlFile}"')
+def step_impl(context, composeYamlFile):
+ context.compose_yaml = composeYamlFile
+ fileArgsToDockerCompose = getDockerComposeFileArgsFromYamlFile(context.compose_yaml)
+ context.compose_output, context.compose_error, context.compose_returncode = \
+ bdd_test_util.cli_call(context, ["docker-compose"] + fileArgsToDockerCompose + ["up","--force-recreate", "-d"], expect_success=True)
+ assert context.compose_returncode == 0, "docker-compose failed to bring up {0}".format(composeYamlFile)
+ parseComposeOutput(context)
+ time.sleep(10) # Should be replaced with a definitive interlock guaranteeing that all peers/membersrvc are ready
+
+@when(u'requesting "{path}" from "{containerName}"')
+def step_impl(context, path, containerName):
+ ipAddress = bdd_test_util.ipFromContainerNamePart(containerName, context.compose_containers)
+ request_url = buildUrl(context, ipAddress, path)
+ print("Requesting path = {0}".format(request_url))
+ resp = requests.get(request_url, headers={'Accept': 'application/json'}, verify=False)
+ assert resp.status_code == 200, "Failed to GET url %s: %s" % (request_url,resp.text)
+ context.response = resp
+ print("")
+
+@then(u'I should get a JSON response containing "{attribute}" attribute')
+def step_impl(context, attribute):
+ getAttributeFromJSON(attribute, context.response.json(), "Attribute not found in response (%s)" %(attribute))
+
+@then(u'I should get a JSON response containing no "{attribute}" attribute')
+def step_impl(context, attribute):
+ try:
+ getAttributeFromJSON(attribute, context.response.json(), "")
+ assert None, "Attribute found in response (%s)" %(attribute)
+ except AssertionError:
+ print("Attribute not found as was expected.")
+
+def getAttributeFromJSON(attribute, jsonObject, msg):
+ return getHierarchyAttributesFromJSON(attribute.split("."), jsonObject, msg)
+
+def getHierarchyAttributesFromJSON(attributes, jsonObject, msg):
+ if len(attributes) > 0:
+ assert attributes[0] in jsonObject, msg
+ return getHierarchyAttributesFromJSON(attributes[1:], jsonObject[attributes[0]], msg)
+ return jsonObject
+
+def formatStringToCompare(value):
+ # double quotes are replaced by simple quotes because is not possible escape double quotes in the attribute parameters.
+ return str(value).replace("\"", "'")
+
+@then(u'I should get a JSON response with "{attribute}" = "{expectedValue}"')
+def step_impl(context, attribute, expectedValue):
+ foundValue = getAttributeFromJSON(attribute, context.response.json(), "Attribute not found in response (%s)" %(attribute))
+ assert (formatStringToCompare(foundValue) == expectedValue), "For attribute %s, expected (%s), instead found (%s)" % (attribute, expectedValue, foundValue)
+
+@then(u'I should get a JSON response with array "{attribute}" contains "{expectedValue}" elements')
+def step_impl(context, attribute, expectedValue):
+ foundValue = getAttributeFromJSON(attribute, context.response.json(), "Attribute not found in response (%s)" %(attribute))
+ assert (len(foundValue) == int(expectedValue)), "For attribute %s, expected array of size (%s), instead found (%s)" % (attribute, expectedValue, len(foundValue))
+
+@given(u'I wait "{seconds}" seconds')
+def step_impl(context, seconds):
+ time.sleep(float(seconds))
+
+@when(u'I wait "{seconds}" seconds')
+def step_impl(context, seconds):
+ time.sleep(float(seconds))
+
+@then(u'I wait "{seconds}" seconds')
+def step_impl(context, seconds):
+ time.sleep(float(seconds))
+
+@when(u'I deploy lang chaincode "{chaincodePath}" of "{chainLang}" with ctor "{ctor}" to "{containerName}"')
+def step_impl(context, chaincodePath, chainLang, ctor, containerName):
+ print("Printing chaincode language " + chainLang)
+
+ chaincode = {
+ "path": chaincodePath,
+ "language": chainLang,
+ "constructor": ctor,
+ "args": getArgsFromContext(context),
+ }
+
+ deployChainCodeToContainer(context, chaincode, containerName)
+
+def getArgsFromContext(context):
+ args = []
+ if 'table' in context:
+ # There is ctor arguments
+ args = context.table[0].cells
+
+ return args
+
+@when(u'I deploy chaincode "{chaincodePath}" with ctor "{ctor}" to "{containerName}"')
+def step_impl(context, chaincodePath, ctor, containerName):
+ chaincode = {
+ "path": chaincodePath,
+ "language": "GOLANG",
+ "constructor": ctor,
+ "args": getArgsFromContext(context),
+ }
+
+ deployChainCodeToContainer(context, chaincode, containerName)
+
+@when(u'I deploy chaincode with name "{chaincodeName}" and with ctor "{ctor}" to "{containerName}"')
+def step_impl(context, chaincodeName, ctor, containerName):
+ chaincode = {
+ "name": chaincodeName,
+ "language": "GOLANG",
+ "constructor": ctor,
+ "args": getArgsFromContext(context),
+ }
+
+ deployChainCodeToContainer(context, chaincode, containerName)
+ time.sleep(2.0) # After #2068 implemented change this to only apply after a successful ping
+
+def deployChainCodeToContainer(context, chaincode, containerName):
+ ipAddress = bdd_test_util.ipFromContainerNamePart(containerName, context.compose_containers)
+ request_url = buildUrl(context, ipAddress, "/chaincode")
+ print("Requesting path = {0}".format(request_url))
+
+ chaincodeSpec = createChaincodeSpec(context, chaincode)
+ chaincodeOpPayload = createChaincodeOpPayload("deploy", chaincodeSpec)
+
+ resp = requests.post(request_url, headers={'Content-type': 'application/json'}, data=json.dumps(chaincodeOpPayload), verify=False)
+ assert resp.status_code == 200, "Failed to POST to %s: %s" %(request_url, resp.text)
+ context.response = resp
+ chaincodeName = resp.json()['result']['message']
+ chaincodeSpec['chaincodeID']['name'] = chaincodeName
+ context.chaincodeSpec = chaincodeSpec
+ print(json.dumps(chaincodeSpec, indent=4))
+ print("")
+
+def createChaincodeSpec(context, chaincode):
+ chaincode = validateChaincodeDictionary(chaincode)
+
+ chaincodeSpec = {
+ "type": getChaincodeTypeValue(chaincode["language"]),
+ "chaincodeID": {
+ "path" : chaincode["path"],
+ "name" : chaincode["name"]
+ },
+ "ctorMsg": {
+ "function" : chaincode["constructor"],
+ "args" : chaincode["args"]
+ },
+ }
+
+ if 'userName' in context:
+ chaincodeSpec["secureContext"] = context.userName
+ if 'metadata' in context:
+ chaincodeSpec["metadata"] = context.metadata
+
+ return chaincodeSpec
+
+def validateChaincodeDictionary(chaincode):
+ chaincodeFields = ["path", "name", "language", "constructor", "args"]
+
+ for field in chaincodeFields:
+ if field not in chaincode:
+ chaincode[field] = ""
+
+ return chaincode
+
+def getChaincodeTypeValue(chainLang):
+ if chainLang == "GOLANG":
+ return 1
+ elif chainLang =="JAVA":
+ return 4
+ elif chainLang == "NODE":
+ return 2
+ elif chainLang == "CAR":
+ return 3
+ elif chainLang == "UNDEFINED":
+ return 0
+ return 1
+
+@when(u'I mock deploy chaincode with name "{chaincodeName}"')
+def step_impl(context, chaincodeName):
+ chaincode = {
+ "name": chaincodeName,
+ "language": "GOLANG"
+ }
+
+ context.chaincodeSpec = createChaincodeSpec(context, chaincode)
+
+@then(u'I should have received a chaincode name')
+def step_impl(context):
+ if 'chaincodeSpec' in context:
+ assert context.chaincodeSpec['chaincodeID']['name'] != ""
+ # Set the current transactionID to the name passed back
+ context.transactionID = context.chaincodeSpec['chaincodeID']['name']
+ elif 'grpcChaincodeSpec' in context:
+ assert context.grpcChaincodeSpec.chaincodeID.name != ""
+ # Set the current transactionID to the name passed back
+ context.transactionID = context.grpcChaincodeSpec.chaincodeID.name
+ else:
+ fail('chaincodeSpec not in context')
+
+@when(u'I invoke chaincode "{chaincodeName}" function name "{functionName}" on "{containerName}" with "{idGenAlg}"')
+def step_impl(context, chaincodeName, functionName, containerName, idGenAlg):
+ assert 'chaincodeSpec' in context, "chaincodeSpec not found in context"
+ invokeChaincode(context, "invoke", functionName, containerName, idGenAlg)
+
+@when(u'I invoke chaincode "{chaincodeName}" function name "{functionName}" on "{containerName}" "{times}" times')
+def step_impl(context, chaincodeName, functionName, containerName, times):
+ assert 'chaincodeSpec' in context, "chaincodeSpec not found in context"
+ for i in range(int(times)):
+ invokeChaincode(context, "invoke", functionName, containerName)
+
+@when(u'I invoke chaincode "{chaincodeName}" function name "{functionName}" with attributes "{attrs}" on "{containerName}"')
+def step_impl(context, chaincodeName, functionName, attrs, containerName):
+ assert 'chaincodeSpec' in context, "chaincodeSpec not found in context"
+ assert attrs, "attrs were not specified"
+ invokeChaincode(context, "invoke", functionName, containerName, None, attrs.split(","))
+
+@when(u'I invoke chaincode "{chaincodeName}" function name "{functionName}" on "{containerName}"')
+def step_impl(context, chaincodeName, functionName, containerName):
+ assert 'chaincodeSpec' in context, "chaincodeSpec not found in context"
+ invokeChaincode(context, "invoke", functionName, containerName)
+
+@when(u'I invoke master chaincode "{chaincodeName}" function name "{functionName}" on "{containerName}"')
+def step_impl(context, chaincodeName, functionName, containerName):
+ invokeMasterChaincode(context, "invoke", chaincodeName, functionName, containerName)
+
+@then(u'I should have received a transactionID')
+def step_impl(context):
+ assert 'transactionID' in context, 'transactionID not found in context'
+ assert context.transactionID != ""
+ pass
+
+@when(u'I unconditionally query chaincode "{chaincodeName}" function name "{functionName}" on "{containerName}"')
+def step_impl(context, chaincodeName, functionName, containerName):
+ invokeChaincode(context, "query", functionName, containerName)
+
+@when(u'I query chaincode "{chaincodeName}" function name "{functionName}" on "{containerName}"')
+def step_impl(context, chaincodeName, functionName, containerName):
+ invokeChaincode(context, "query", functionName, containerName)
+
+def createChaincodeOpPayload(method, chaincodeSpec):
+ chaincodeOpPayload = {
+ "jsonrpc": JSONRPC_VERSION,
+ "method" : method,
+ "params" : chaincodeSpec,
+ "id" : 1
+ }
+ return chaincodeOpPayload
+
+def invokeChaincode(context, devopsFunc, functionName, containerName, idGenAlg=None, attributes=[]):
+ assert 'chaincodeSpec' in context, "chaincodeSpec not found in context"
+ # Update the chaincodeSpec ctorMsg for invoke
+ args = []
+ if 'table' in context:
+ # There is ctor arguments
+ args = context.table[0].cells
+
+ for idx, attr in enumerate(attributes):
+ attributes[idx] = attr.strip()
+
+ context.chaincodeSpec['ctorMsg']['function'] = functionName
+ context.chaincodeSpec['ctorMsg']['args'] = args
+ context.chaincodeSpec['attributes'] = attributes
+
+ #If idGenAlg is passed then, we still using the deprecated devops API because this parameter can't be passed in the new API.
+ if idGenAlg != None:
+ invokeUsingDevopsService(context, devopsFunc, functionName, containerName, idGenAlg)
+ else:
+ invokeUsingChaincodeService(context, devopsFunc, functionName, containerName)
+
+def invokeUsingChaincodeService(context, devopsFunc, functionName, containerName):
+ # Invoke the POST
+ chaincodeOpPayload = createChaincodeOpPayload(devopsFunc, context.chaincodeSpec)
+
+ ipAddress = bdd_test_util.ipFromContainerNamePart(containerName, context.compose_containers)
+
+ request_url = buildUrl(context, ipAddress, "/chaincode")
+ print("{0} POSTing path = {1}".format(currentTime(), request_url))
+ print("Using attributes {0}".format(context.chaincodeSpec['attributes']))
+
+ resp = requests.post(request_url, headers={'Content-type': 'application/json'}, data=json.dumps(chaincodeOpPayload), verify=False)
+ assert resp.status_code == 200, "Failed to POST to %s: %s" %(request_url, resp.text)
+ context.response = resp
+ print("RESULT from {0} of chaincode from peer {1}".format(functionName, containerName))
+ print(json.dumps(context.response.json(), indent = 4))
+ if 'result' in resp.json():
+ result = resp.json()['result']
+ if 'message' in result:
+ transactionID = result['message']
+ context.transactionID = transactionID
+
+def invokeUsingDevopsService(context, devopsFunc, functionName, containerName, idGenAlg):
+ # Invoke the POST
+ chaincodeInvocationSpec = {
+ "chaincodeSpec" : context.chaincodeSpec
+ }
+ ipAddress = bdd_test_util.ipFromContainerNamePart(containerName, context.compose_containers)
+ if idGenAlg is not None:
+ chaincodeInvocationSpec['idGenerationAlg'] = idGenAlg
+ request_url = buildUrl(context, ipAddress, "/devops/{0}".format(devopsFunc))
+ print("{0} POSTing path = {1}".format(currentTime(), request_url))
+
+ resp = requests.post(request_url, headers={'Content-type': 'application/json'}, data=json.dumps(chaincodeInvocationSpec), verify=False)
+ assert resp.status_code == 200, "Failed to POST to %s: %s" %(request_url, resp.text)
+ context.response = resp
+ print("RESULT from {0} of chaincode from peer {1}".format(functionName, containerName))
+ print(json.dumps(context.response.json(), indent = 4))
+ if 'message' in resp.json():
+ transactionID = context.response.json()['message']
+ context.transactionID = transactionID
+
+def invokeMasterChaincode(context, devopsFunc, chaincodeName, functionName, containerName):
+ args = []
+ if 'table' in context:
+ args = context.table[0].cells
+ typeGolang = 1
+ chaincodeSpec = {
+ "type": typeGolang,
+ "chaincodeID": {
+ "name" : chaincodeName
+ },
+ "ctorMsg": {
+ "function" : functionName,
+ "args" : args
+ }
+ }
+ if 'userName' in context:
+ chaincodeSpec["secureContext"] = context.userName
+
+ chaincodeOpPayload = createChaincodeOpPayload(devopsFunc, chaincodeSpec)
+
+ ipAddress = bdd_test_util.ipFromContainerNamePart(containerName, context.compose_containers)
+ request_url = buildUrl(context, ipAddress, "/chaincode")
+ print("{0} POSTing path = {1}".format(currentTime(), request_url))
+
+ resp = requests.post(request_url, headers={'Content-type': 'application/json'}, data=json.dumps(chaincodeOpPayload), verify=False)
+ assert resp.status_code == 200, "Failed to POST to %s: %s" %(request_url, resp.text)
+ context.response = resp
+ print("RESULT from {0} of chaincode from peer {1}".format(functionName, containerName))
+ print(json.dumps(context.response.json(), indent = 4))
+ if 'result' in resp.json():
+ result = resp.json()['result']
+ if 'message' in result:
+ transactionID = result['message']
+ context.transactionID = transactionID
+
+@then(u'I wait "{seconds}" seconds for chaincode to build')
+def step_impl(context, seconds):
+ """ This step takes into account the chaincodeImagesUpToDate tag, in which case the wait is reduce to some default seconds"""
+ reducedWaitTime = 4
+ if 'chaincodeImagesUpToDate' in context.tags:
+ print("Assuming images are up to date, sleeping for {0} seconds instead of {1} in scenario {2}".format(reducedWaitTime, seconds, context.scenario.name))
+ time.sleep(float(reducedWaitTime))
+ else:
+ time.sleep(float(seconds))
+
+@then(u'I wait "{seconds}" seconds for transaction to be committed to block on "{containerName}"')
+def step_impl(context, seconds, containerName):
+ assert 'transactionID' in context, "transactionID not found in context"
+ ipAddress = bdd_test_util.ipFromContainerNamePart(containerName, context.compose_containers)
+ request_url = buildUrl(context, ipAddress, "/transactions/{0}".format(context.transactionID))
+ print("{0} GETing path = {1}".format(currentTime(), request_url))
+
+ resp = requests.get(request_url, headers={'Accept': 'application/json'}, verify=False)
+ assert resp.status_code == 200, "Failed to POST to %s: %s" %(request_url, resp.text)
+ context.response = resp
+
+def multiRequest(context, seconds, containerDataList, pathBuilderFunc):
+ """Perform a multi request against the system"""
+ # Build map of "containerName" : response
+ respMap = {container.containerName:None for container in containerDataList}
+ # Set the max time before stopping attempts
+ maxTime = datetime.now() + timedelta(seconds = int(seconds))
+ for container in containerDataList:
+ ipAddress = container.ipAddress
+ request_url = buildUrl(context, ipAddress, pathBuilderFunc(context, container))
+
+ # Loop unless failure or time exceeded
+ while (datetime.now() < maxTime):
+ print("{0} GETing path = {1}".format(currentTime(), request_url))
+ resp = requests.get(request_url, headers={'Accept': 'application/json'}, verify=False)
+ respMap[container.containerName] = resp
+ else:
+ raise Exception("Max time exceeded waiting for multiRequest with current response map = {0}".format(respMap))
+
+@then(u'I wait up to "{seconds}" seconds for transaction to be committed to all peers')
+def step_impl(context, seconds):
+ assert 'transactionID' in context, "transactionID not found in context"
+ assert 'compose_containers' in context, "compose_containers not found in context"
+
+ # Build map of "containerName" : resp.statusCode
+ respMap = {container.containerName:0 for container in context.compose_containers}
+
+ # Set the max time before stopping attempts
+ maxTime = datetime.now() + timedelta(seconds = int(seconds))
+ for container in context.compose_containers:
+ ipAddress = container.ipAddress
+ request_url = buildUrl(context, ipAddress, "/transactions/{0}".format(context.transactionID))
+
+ # Loop unless failure or time exceeded
+ while (datetime.now() < maxTime):
+ print("{0} GETing path = {1}".format(currentTime(), request_url))
+ resp = requests.get(request_url, headers={'Accept': 'application/json'}, verify=False)
+ if resp.status_code == 404:
+ # Pause then try again
+ respMap[container.containerName] = 404
+ time.sleep(1)
+ continue
+ elif resp.status_code == 200:
+ # Success, continue
+ respMap[container.containerName] = 200
+ break
+ else:
+ raise Exception("Error requesting {0}, returned result code = {1}".format(request_url, resp.status_code))
+ else:
+ raise Exception("Max time exceeded waiting for transactions with current response map = {0}".format(respMap))
+ print("Result of request to all peers = {0}".format(respMap))
+ print("")
+
+@then(u'I check the transaction ID if it is "{tUUID}"')
+def step_impl(context, tUUID):
+ assert 'transactionID' in context, "transactionID not found in context"
+ assert context.transactionID == tUUID, "transactionID is not tUUID"
+
+def getContainerDataValuesFromContext(context, aliases, callback):
+ """Returns the IPAddress based upon a name part of the full container name"""
+ assert 'compose_containers' in context, "compose_containers not found in context"
+ values = []
+ containerNamePrefix = os.path.basename(os.getcwd()) + "_"
+ for namePart in aliases:
+ for containerData in context.compose_containers:
+ if containerData.containerName.startswith(containerNamePrefix + namePart):
+ values.append(callback(containerData))
+ break
+ return values
+
+@then(u'I wait up to "{seconds}" seconds for transaction to be committed to peers')
+def step_impl(context, seconds):
+ assert 'transactionID' in context, "transactionID not found in context"
+ assert 'compose_containers' in context, "compose_containers not found in context"
+ assert 'table' in context, "table (of peers) not found in context"
+
+ aliases = context.table.headings
+ containerDataList = bdd_test_util.getContainerDataValuesFromContext(context, aliases, lambda containerData: containerData)
+
+ # Build map of "containerName" : resp.statusCode
+ respMap = {container.containerName:0 for container in containerDataList}
+
+ # Set the max time before stopping attempts
+ maxTime = datetime.now() + timedelta(seconds = int(seconds))
+ for container in containerDataList:
+ ipAddress = container.ipAddress
+ request_url = buildUrl(context, ipAddress, "/transactions/{0}".format(context.transactionID))
+
+ # Loop unless failure or time exceeded
+ while (datetime.now() < maxTime):
+ print("{0} GETing path = {1}".format(currentTime(), request_url))
+ resp = requests.get(request_url, headers={'Accept': 'application/json'}, verify=False)
+ if resp.status_code == 404:
+ # Pause then try again
+ respMap[container.containerName] = 404
+ time.sleep(1)
+ continue
+ elif resp.status_code == 200:
+ # Success, continue
+ respMap[container.containerName] = 200
+ break
+ else:
+ raise Exception("Error requesting {0}, returned result code = {1}".format(request_url, resp.status_code))
+ else:
+ raise Exception("Max time exceeded waiting for transactions with current response map = {0}".format(respMap))
+ print("Result of request to all peers = {0}".format(respMap))
+ print("")
+
+
+@then(u'I should get a rejection message in the listener after stopping it')
+def step_impl(context):
+ assert "eventlistener" in context, "no eventlistener is started"
+ context.eventlistener.terminate()
+ output = context.eventlistener.stdout.read()
+ rejection = "Received rejected transaction"
+ assert rejection in output, "no rejection message was found"
+ assert output.count(rejection) == 1, "only one rejection message should be found"
+
+
+@when(u'I query chaincode "{chaincodeName}" function name "{functionName}" on all peers')
+def step_impl(context, chaincodeName, functionName):
+ assert 'chaincodeSpec' in context, "chaincodeSpec not found in context"
+ assert 'compose_containers' in context, "compose_containers not found in context"
+ # Update the chaincodeSpec ctorMsg for invoke
+ args = []
+ if 'table' in context:
+ # There is ctor arguments
+ args = context.table[0].cells
+ context.chaincodeSpec['ctorMsg']['function'] = functionName
+ context.chaincodeSpec['ctorMsg']['args'] = args #context.table[0].cells if ('table' in context) else []
+ # Invoke the POST
+ chaincodeOpPayload = createChaincodeOpPayload("query", context.chaincodeSpec)
+
+ responses = []
+ for container in context.compose_containers:
+ request_url = buildUrl(context, container.ipAddress, "/chaincode")
+ print("{0} POSTing path = {1}".format(currentTime(), request_url))
+ resp = requests.post(request_url, headers={'Content-type': 'application/json'}, data=json.dumps(chaincodeOpPayload), verify=False)
+ assert resp.status_code == 200, "Failed to POST to %s: %s" %(request_url, resp.text)
+ responses.append(resp)
+ context.responses = responses
+
+@when(u'I unconditionally query chaincode "{chaincodeName}" function name "{functionName}" with value "{value}" on peers')
+def step_impl(context, chaincodeName, functionName, value):
+ query_common(context, chaincodeName, functionName, value, False)
+
+@when(u'I query chaincode "{chaincodeName}" function name "{functionName}" with value "{value}" on peers')
+def step_impl(context, chaincodeName, functionName, value):
+ query_common(context, chaincodeName, functionName, value, True)
+
+def query_common(context, chaincodeName, functionName, value, failOnError):
+ assert 'chaincodeSpec' in context, "chaincodeSpec not found in context"
+ assert 'compose_containers' in context, "compose_containers not found in context"
+ assert 'table' in context, "table (of peers) not found in context"
+ assert 'peerToSecretMessage' in context, "peerToSecretMessage map not found in context"
+
+ aliases = context.table.headings
+ containerDataList = bdd_test_util.getContainerDataValuesFromContext(context, aliases, lambda containerData: containerData)
+
+ # Update the chaincodeSpec ctorMsg for invoke
+ context.chaincodeSpec['ctorMsg']['function'] = functionName
+ context.chaincodeSpec['ctorMsg']['args'] = [value]
+ # Invoke the POST
+ # Make deep copy of chaincodeSpec as we will be changing the SecurityContext per call.
+ chaincodeOpPayload = createChaincodeOpPayload("query", copy.deepcopy(context.chaincodeSpec))
+
+ responses = []
+ for container in containerDataList:
+ # Change the SecurityContext per call
+ chaincodeOpPayload['params']["secureContext"] = context.peerToSecretMessage[container.composeService]['enrollId']
+ print("Container {0} enrollID = {1}".format(container.containerName, container.getEnv("CORE_SECURITY_ENROLLID")))
+ request_url = buildUrl(context, container.ipAddress, "/chaincode")
+ print("{0} POSTing path = {1}".format(currentTime(), request_url))
+ resp = requests.post(request_url, headers={'Content-type': 'application/json'}, data=json.dumps(chaincodeOpPayload), timeout=30, verify=False)
+ if failOnError:
+ assert resp.status_code == 200, "Failed to POST to %s: %s" %(request_url, resp.text)
+ print("RESULT from {0} of chaincode from peer {1}".format(functionName, container.containerName))
+ print(json.dumps(resp.json(), indent = 4))
+ responses.append(resp)
+ context.responses = responses
+
+@then(u'I should get a JSON response from all peers with "{attribute}" = "{expectedValue}"')
+def step_impl(context, attribute, expectedValue):
+ assert 'responses' in context, "responses not found in context"
+ for resp in context.responses:
+ foundValue = getAttributeFromJSON(attribute, resp.json(), "Attribute not found in response (%s)" %(attribute))
+ assert (formatStringToCompare(foundValue) == expectedValue), "For attribute %s, expected (%s), instead found (%s)" % (attribute, expectedValue, foundValue)
+
+@then(u'I should get a JSON response from peers with "{attribute}" = "{expectedValue}"')
+def step_impl(context, attribute, expectedValue):
+ assert 'responses' in context, "responses not found in context"
+ assert 'compose_containers' in context, "compose_containers not found in context"
+ assert 'table' in context, "table (of peers) not found in context"
+
+ for resp in context.responses:
+ foundValue = getAttributeFromJSON(attribute, resp.json(), "Attribute not found in response (%s)" %(attribute))
+ assert (formatStringToCompare(foundValue) == expectedValue), "For attribute %s, expected (%s), instead found (%s)" % (attribute, expectedValue, foundValue)
+
+@given(u'I register with CA supplying username "{userName}" and secret "{secret}" on peers')
+def step_impl(context, userName, secret):
+ assert 'compose_containers' in context, "compose_containers not found in context"
+ assert 'table' in context, "table (of peers) not found in context"
+
+ # Get list of IPs to login to
+ aliases = context.table.headings
+ containerDataList = bdd_test_util.getContainerDataValuesFromContext(context, aliases, lambda containerData: containerData)
+
+ secretMsg = {
+ "enrollId": userName,
+ "enrollSecret" : secret
+ }
+
+ # Login to each container specified
+ for containerData in containerDataList:
+ request_url = buildUrl(context, containerData.ipAddress, "/registrar")
+ print("{0} POSTing path = {1}".format(currentTime(), request_url))
+
+ resp = requests.post(request_url, headers={'Content-type': 'application/json'}, data=json.dumps(secretMsg), verify=False)
+ assert resp.status_code == 200, "Failed to POST to %s: %s" %(request_url, resp.text)
+ context.response = resp
+ print("message = {0}".format(resp.json()))
+
+ # Create new User entry
+ bdd_test_util.registerUser(context, secretMsg, containerData.composeService)
+
+ # Store the username in the context
+ context.userName = userName
+ # if we already have the chaincodeSpec, change secureContext
+ if 'chaincodeSpec' in context:
+ context.chaincodeSpec["secureContext"] = context.userName
+
+
+@given(u'I use the following credentials for querying peers')
+def step_impl(context):
+ assert 'compose_containers' in context, "compose_containers not found in context"
+ assert 'table' in context, "table (of peers, username, secret) not found in context"
+
+ peerToSecretMessage = {}
+
+ # Login to each container specified using username and secret
+ for row in context.table.rows:
+ peer, userName, secret = row['peer'], row['username'], row['secret']
+ secretMsg = {
+ "enrollId": userName,
+ "enrollSecret" : secret
+ }
+
+ ipAddress = bdd_test_util.ipFromContainerNamePart(peer, context.compose_containers)
+ request_url = buildUrl(context, ipAddress, "/registrar")
+ print("POSTing to service = {0}, path = {1}".format(peer, request_url))
+
+ resp = requests.post(request_url, headers={'Content-type': 'application/json'}, data=json.dumps(secretMsg), verify=False)
+ assert resp.status_code == 200, "Failed to POST to %s: %s" %(request_url, resp.text)
+ context.response = resp
+ print("message = {0}".format(resp.json()))
+ peerToSecretMessage[peer] = secretMsg
+ context.peerToSecretMessage = peerToSecretMessage
+
+
+@given(u'I stop peers')
+def step_impl(context):
+ compose_op(context, "stop")
+
+
+@given(u'I start a listener')
+def step_impl(context):
+ gopath = os.environ.get('GOPATH')
+ assert gopath is not None, "Please set GOPATH properly!"
+ listener = os.path.join(gopath, "src/github.com/hyperledger/fabric/build/docker/bin/block-listener")
+ assert os.path.isfile(listener), "Please build the block-listener binary!"
+ bdd_test_util.start_background_process(context, "eventlistener", [listener, "-listen-to-rejections"] )
+
+
+@given(u'I start peers')
+def step_impl(context):
+ compose_op(context, "start")
+
+@given(u'I pause peers')
+def step_impl(context):
+ compose_op(context, "pause")
+
+@given(u'I unpause peers')
+def step_impl(context):
+ compose_op(context, "unpause")
+
+def compose_op(context, op):
+ assert 'table' in context, "table (of peers) not found in context"
+ assert 'compose_yaml' in context, "compose_yaml not found in context"
+
+ fileArgsToDockerCompose = getDockerComposeFileArgsFromYamlFile(context.compose_yaml)
+ services = context.table.headings
+ # Loop through services and start/stop them, and modify the container data list if successful.
+ for service in services:
+ context.compose_output, context.compose_error, context.compose_returncode = \
+ bdd_test_util.cli_call(context, ["docker-compose"] + fileArgsToDockerCompose + [op, service], expect_success=True)
+ assert context.compose_returncode == 0, "docker-compose failed to {0} {0}".format(op, service)
+ if op == "stop" or op == "pause":
+ context.compose_containers = [containerData for containerData in context.compose_containers if containerData.composeService != service]
+ else:
+ parseComposeOutput(context)
+ print("After {0}ing, the container service list is = {1}".format(op, [containerData.composeService for containerData in context.compose_containers]))
diff --git a/bddtests/steps/peer_logging_impl.py b/bddtests/steps/peer_logging_impl.py
new file mode 100755
index 00000000000..d96fa487bba
--- /dev/null
+++ b/bddtests/steps/peer_logging_impl.py
@@ -0,0 +1,61 @@
+#
+# Copyright IBM Corp. 2016 All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+import os
+import os.path
+import re
+import time
+import copy
+from datetime import datetime, timedelta
+from behave import *
+
+import sys, requests, json
+
+import bdd_test_util
+
+@then(u'I wait up to {waitTime} seconds for an error in the logs for peer {peerName}')
+def step_impl(context, waitTime, peerName):
+ timeout = time.time() + float(waitTime)
+ hasError = False
+
+ while timeout > time.time():
+ stdout, stderr = getPeerLogs(context, peerName)
+ hasError = logHasError(stdout) or logHasError(stderr)
+
+ if hasError:
+ break
+
+ time.sleep(1.0)
+
+ assert hasError is True
+
+def getPeerLogs(context, peerName):
+ fullContainerName = bdd_test_util.fullNameFromContainerNamePart(peerName, context.compose_containers)
+ stdout, stderr, retcode = bdd_test_util.cli_call(context, ["docker", "logs", fullContainerName], expect_success=True)
+
+ return stdout, stderr
+
+def logHasError(logText):
+ # This seems to be an acceptable heuristic for detecting errors
+ return logText.find("-> ERRO") >= 0
+
+@then(u'ensure after {waitTime} seconds there are no errors in the logs for peer {peerName}')
+def step_impl(context, waitTime, peerName):
+ time.sleep(float(waitTime))
+ stdout, stderr = getPeerLogs(context, peerName)
+
+ assert logHasError(stdout) is False
+ assert logHasError(stderr) is False
\ No newline at end of file
diff --git a/bddtests/steps/sdk_impl.py b/bddtests/steps/sdk_impl.py
new file mode 100644
index 00000000000..e149cec21c7
--- /dev/null
+++ b/bddtests/steps/sdk_impl.py
@@ -0,0 +1,43 @@
+import os
+import re
+import time
+import copy
+import base64
+from datetime import datetime, timedelta
+
+import sys, requests, json
+
+import bdd_test_util
+
+from grpc.beta import implementations
+
+import fabric_pb2
+import chaincode_pb2
+import devops_pb2
+
+SDK_NODE_APP_REST_PORT = 8080
+
+def buildUrl(context, ipAddress, path):
+ schema = "http"
+ if 'TLS' in context.tags:
+ schema = "https"
+ return "{0}://{1}:{2}{3}".format(schema, ipAddress, SDK_NODE_APP_REST_PORT, path)
+
+
+@given(u'I register thru the sample SDK app supplying username "{enrollId}" and secret "{enrollSecret}" on "{composeService}"')
+def step_impl(context, enrollId, enrollSecret, composeService):
+ assert 'compose_containers' in context, "compose_containers not found in context"
+
+ # Get the sampleApp IP Address
+ containerDataList = bdd_test_util.getContainerDataValuesFromContext(context, [composeService], lambda containerData: containerData)
+ sampleAppIpAddress = containerDataList[0].ipAddress
+ secretMsg = {
+ "enrollId": enrollId,
+ "enrollSecret" : enrollSecret
+ }
+ request_url = buildUrl(context, sampleAppIpAddress, "/")
+ resp = requests.get(request_url, headers={'Accept': 'application/json'}, verify=False)
+ assert resp.status_code == 200, "Failed to GET url %s: %s" % (request_url,resp.text)
+ context.response = resp
+ print("")
+
diff --git a/bddtests/syschaincode/noop/chaincode.go b/bddtests/syschaincode/noop/chaincode.go
new file mode 100644
index 00000000000..3d2204053e9
--- /dev/null
+++ b/bddtests/syschaincode/noop/chaincode.go
@@ -0,0 +1,107 @@
+/*
+ Copyright Digital Asset Holdings, LLC 2016 All Rights Reserved.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package noop
+
+import (
+ "encoding/base64"
+ "errors"
+ "fmt"
+
+ "github.com/golang/protobuf/proto"
+ "github.com/hyperledger/fabric/core/chaincode/shim"
+ ld "github.com/hyperledger/fabric/core/ledger"
+ "github.com/hyperledger/fabric/protos"
+)
+
+var logger = shim.NewLogger("noop")
+
+type ledgerHandler interface {
+ GetTransactionByUUID(txUUID string) (*protos.Transaction, error)
+}
+
+// SystemChaincode is type representing the chaincode
+// In general, one should not use vars in memory that can hold state
+// across invokes but this is used JUST for MOCKING
+type SystemChaincode struct {
+ mockLedgerH ledgerHandler
+}
+
+func (t *SystemChaincode) getLedger() ledgerHandler {
+ if t.mockLedgerH == nil {
+ lh, err := ld.GetLedger()
+ if err == nil {
+ return lh
+ }
+ panic("Chaincode is unable to get the ledger.")
+ } else {
+ return t.mockLedgerH
+ }
+}
+
+// Init initailizes the system chaincode
+func (t *SystemChaincode) Init(stub *shim.ChaincodeStub, function string, args []string) ([]byte, error) {
+ logger.SetLevel(shim.LogDebug)
+ logger.Debugf("NOOP INIT")
+ return nil, nil
+}
+
+// Invoke runs an invocation on the system chaincode
+func (t *SystemChaincode) Invoke(stub *shim.ChaincodeStub, function string, args []string) ([]byte, error) {
+ switch function {
+ case "execute":
+
+ if len(args) < 1 {
+ return nil, errors.New("execute operation must include single argument, the base64 encoded form of a byte sequence")
+ }
+ logger.Infof("Executing NOOP INVOKE")
+ return nil, nil
+
+ default:
+ return nil, errors.New("Unsupported operation")
+ }
+}
+
+// Query callback representing the query of a chaincode
+func (t *SystemChaincode) Query(stub *shim.ChaincodeStub, function string, args []string) ([]byte, error) {
+ switch function {
+ case "getTran":
+ if len(args) < 1 {
+ return nil, errors.New("getTran operation must include a single argument, the TX hash hex")
+ }
+ logger.Infof("Executing NOOP QUERY")
+ logger.Infof("--> %x", args[0])
+
+ var txHashHex = args[0]
+ var tx, txerr = t.getLedger().GetTransactionByUUID(txHashHex)
+ if nil != txerr || nil == tx {
+ return nil, txerr
+ }
+ newCCIS := &protos.ChaincodeInvocationSpec{}
+ var merr = proto.Unmarshal(tx.Payload, newCCIS)
+ if nil != merr {
+ return nil, merr
+ }
+ var data = newCCIS.ChaincodeSpec.CtorMsg.Args[0]
+ var dataInByteForm, b64err = base64.StdEncoding.DecodeString(data)
+ if b64err != nil {
+ return nil, fmt.Errorf("Error in decoding from Base64: %s", b64err)
+ }
+ return dataInByteForm, nil
+ default:
+ return nil, errors.New("Unsupported operation")
+ }
+}
diff --git a/bddtests/syschaincode/noop/chaincode_test.go b/bddtests/syschaincode/noop/chaincode_test.go
new file mode 100644
index 00000000000..13d46866eb5
--- /dev/null
+++ b/bddtests/syschaincode/noop/chaincode_test.go
@@ -0,0 +1,121 @@
+/*
+ Copyright Digital Asset Holdings, LLC 2016 All Rights Reserved.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package noop
+
+import (
+ "fmt"
+ "testing"
+
+ "github.com/golang/protobuf/proto"
+ "github.com/hyperledger/fabric/protos"
+)
+
+var something = "c29tZXRoaW5n"
+
+func TestMocking(t *testing.T) {
+ var mockledger, ledger ledgerHandler
+ mockledger = mockLedger{}
+ var noop = SystemChaincode{mockledger}
+ ledger = noop.getLedger()
+ if mockledger != ledger {
+ t.Errorf("Mocking functionality of Noop system chaincode does not work.")
+ }
+}
+
+func TestInvokeUnsupported(t *testing.T) {
+ var noop = SystemChaincode{mockLedger{}}
+ var res, err = noop.Invoke(nil, "unsupported_operation", []string{"arg1", "arg2"})
+ if res != nil || err == nil {
+ t.Errorf("Invoke has to return nil and error when called with unsupported operation!")
+ }
+}
+
+func TestInvokeExecuteNotEnoughArgs(t *testing.T) {
+ var noop = SystemChaincode{mockLedger{}}
+ var res, err = noop.Invoke(nil, "execute", []string{})
+ if res != nil || err == nil {
+ t.Errorf("Invoke.execute has to indicate error if called with less than one arguments!")
+ }
+}
+
+func TestInvokeExecuteOneArgReturnsNothing(t *testing.T) {
+ var noop = SystemChaincode{mockLedger{}}
+ var res, err = noop.Invoke(nil, "execute", []string{"arg1"})
+ if res != nil || err != nil {
+ t.Errorf("Invoke.execute has to return nil with no error.")
+ }
+}
+
+func TestInvokeExecuteMoreArgsReturnsNothing(t *testing.T) {
+ var noop = SystemChaincode{mockLedger{}}
+ var res, err = noop.Invoke(nil, "execute", []string{"arg1", "arg2"})
+ if res != nil || err != nil {
+ t.Errorf("Invoke.execute has to return nil with no error.")
+ }
+}
+
+func TestQueryUnsupported(t *testing.T) {
+ var noop = SystemChaincode{mockLedger{}}
+ var res, err = noop.Query(nil, "unsupported_operation", []string{"arg1", "arg2"})
+ if res != nil || err == nil {
+ t.Errorf("Invoke has to return nil and error when called with unsupported operation!")
+ }
+}
+
+func TestQueryGetTranNotEnoughArgs(t *testing.T) {
+ var noop = SystemChaincode{mockLedger{}}
+ var res, err = noop.Query(nil, "getTran", []string{})
+ if res != nil || err == nil {
+ t.Errorf("Invoke has to return nil and error when called with unsupported operation!")
+ }
+}
+
+func TestQueryGetTranNonExisting(t *testing.T) {
+ var noop = SystemChaincode{mockLedger{}}
+ var res, err = noop.Query(nil, "getTran", []string{"noSuchTX"})
+ if res != nil || err == nil {
+ t.Errorf("Invoke has to return nil when called with a non-existent transaction.")
+ }
+}
+
+func TestQueryGetTranNonExistingWithManyArgs(t *testing.T) {
+ var noop = SystemChaincode{mockLedger{}}
+ var res, err = noop.Query(nil, "getTran", []string{"noSuchTX", "arg2"})
+ if res != nil || err == nil {
+ t.Errorf("Invoke has to return nil when called with a non-existent transaction.")
+ }
+}
+
+func TestQueryGetTranExisting(t *testing.T) {
+ var noop = SystemChaincode{mockLedger{}}
+ var res, err = noop.Query(nil, "getTran", []string{"someTx"})
+ if res == nil || err != nil {
+ t.Errorf("Invoke has to return a transaction when called with an existing one.")
+ }
+}
+
+type mockLedger struct {
+}
+
+func (ml mockLedger) GetTransactionByUUID(txUUID string) (*protos.Transaction, error) {
+ if txUUID == "noSuchTX" {
+ return nil, fmt.Errorf("Some error")
+ }
+ newCCIS := &protos.ChaincodeInvocationSpec{ChaincodeSpec: &protos.ChaincodeSpec{CtorMsg: &protos.ChaincodeInput{Function: "execute", Args: []string{something}}}}
+ pl, _ := proto.Marshal(newCCIS)
+ return &protos.Transaction{Payload: pl}, nil
+}
diff --git a/bddtests/tlsca.cert b/bddtests/tlsca.cert
new file mode 100644
index 00000000000..990911284de
--- /dev/null
+++ b/bddtests/tlsca.cert
@@ -0,0 +1,12 @@
+-----BEGIN CERTIFICATE-----
+MIIBwTCCAUegAwIBAgIBATAKBggqhkjOPQQDAzApMQswCQYDVQQGEwJVUzEMMAoG
+A1UEChMDSUJNMQwwCgYDVQQDEwNPQkMwHhcNMTYwMTIxMjI0OTUxWhcNMTYwNDIw
+MjI0OTUxWjApMQswCQYDVQQGEwJVUzEMMAoGA1UEChMDSUJNMQwwCgYDVQQDEwNP
+QkMwdjAQBgcqhkjOPQIBBgUrgQQAIgNiAAR6YAoPOwMzIVi+P83V79I6BeIyJeaM
+meqWbmwQsTRlKD6g0L0YvczQO2vp+DbxRN11okGq3O/ctcPzvPXvm7Mcbb3whgXW
+RjbsX6wn25tF2/hU6fQsyQLPiJuNj/yxknSjQzBBMA4GA1UdDwEB/wQEAwIChDAP
+BgNVHRMBAf8EBTADAQH/MA0GA1UdDgQGBAQBAgMEMA8GA1UdIwQIMAaABAECAwQw
+CgYIKoZIzj0EAwMDaAAwZQIxAITGmq+x5N7Q1jrLt3QFRtTKsuNIosnlV4LR54l3
+yyDo17Ts0YLyC0pZQFd+GURSOQIwP/XAwoMcbJJtOVeW/UL2EOqmKA2ygmWX5kte
+9Lngf550S6gPEWuDQOcY95B+x3eH
+-----END CERTIFICATE-----
diff --git a/bddtests/tlsca.priv b/bddtests/tlsca.priv
new file mode 100644
index 00000000000..3652958387e
--- /dev/null
+++ b/bddtests/tlsca.priv
@@ -0,0 +1,6 @@
+-----BEGIN ECDSA PRIVATE KEY-----
+MIGkAgEBBDDSK85W5GPJ4WVYV/6I8NQuwXswMvoNJ/FzKjCgdWLAfcvYM4jO/rIo
+ytwrwphFijigBwYFK4EEACKhZANiAAR6YAoPOwMzIVi+P83V79I6BeIyJeaMmeqW
+bmwQsTRlKD6g0L0YvczQO2vp+DbxRN11okGq3O/ctcPzvPXvm7Mcbb3whgXWRjbs
+X6wn25tF2/hU6fQsyQLPiJuNj/yxknQ=
+-----END ECDSA PRIVATE KEY-----
diff --git a/bddtests/utxo.feature b/bddtests/utxo.feature
new file mode 100644
index 00000000000..3d19954a4e3
--- /dev/null
+++ b/bddtests/utxo.feature
@@ -0,0 +1,45 @@
+#
+# Test openchain Peers
+#
+# Tags that can be used and will affect test internals:
+#
+# @doNotDecompose will NOT decompose the named compose_yaml after scenario ends. Useful for setting up environment and reviewing after scenario.
+#
+# @chaincodeImagesUpToDate use this if all scenarios chaincode images are up to date, and do NOT require building. BE SURE!!!
+
+#@chaincodeImagesUpToDate
+Feature: utxo
+ As an openchain developer
+ I want to be able to launch a 3 peers
+
+ #@doNotDecompose
+ @wip
+ @issueUtxo
+ Scenario: UTXO chaincode test
+ Given we compose "docker-compose-1.yml"
+ And I wait "1" seconds
+ When requesting "/chain" from "vp0"
+ Then I should get a JSON response with "height" = "1"
+ When I deploy chaincode "github.com/openblockchain/obc-peer/examples/chaincode/go/utxo" with ctor "init" to "vp0"
+ ||
+ ||
+
+ Then I should have received a chaincode name
+ Then I wait up to "60" seconds for transaction to be committed to all peers
+
+ When requesting "/chain" from "vp0"
+ Then I should get a JSON response with "height" = "2"
+
+ When I invoke chaincode "map" function name "execute" on "vp0"
+ | arg1 |
+ | AQAAAAEAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAP////9NBP//AB0BBEVUaGUgVGltZXMgMDMvSmFuLzIwMDkgQ2hhbmNlbGxvciBvbiBicmluayBvZiBzZWNvbmQgYmFpbG91dCBmb3IgYmFua3P/////AQDyBSoBAAAAQ0EEZ4r9sP5VSCcZZ/GmcTC3EFzWqCjgOQmmeWLg6h9h3rZJ9rw/TO84xPNVBOUewRLeXDhN97oLjVeKTHAra/EdX6wAAAAA |
+ Then I should have received a transactionID
+ Then I wait up to "25" seconds for transaction to be committed to all peers
+
+ When requesting "/chain" from "vp0"
+ Then I should get a JSON response with "height" = "3"
+
+ # When I query chaincode "map" function name "get" on "vp0":
+ # | arg1|
+ # | key1 |
+ # Then I should get a JSON response with "OK" = "value1"
diff --git a/consensus/consensus.go b/consensus/consensus.go
new file mode 100644
index 00000000000..0eb66c2058e
--- /dev/null
+++ b/consensus/consensus.go
@@ -0,0 +1,115 @@
+/*
+Copyright IBM Corp. 2016 All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package consensus
+
+import (
+ pb "github.com/hyperledger/fabric/protos"
+)
+
+// ExecutionConsumer allows callbacks from asycnhronous execution and statetransfer
+type ExecutionConsumer interface {
+ Executed(tag interface{}) // Called whenever Execute completes
+ Committed(tag interface{}, target *pb.BlockchainInfo) // Called whenever Commit completes
+ RolledBack(tag interface{}) // Called whenever a Rollback completes
+ StateUpdated(tag interface{}, target *pb.BlockchainInfo) // Called when state transfer completes, if target is nil, this indicates a failure and a new target should be supplied
+}
+
+// Consenter is used to receive messages from the network
+// Every consensus plugin needs to implement this interface
+type Consenter interface {
+ RecvMsg(msg *pb.Message, senderHandle *pb.PeerID) error // Called serially with incoming messages from gRPC
+ ExecutionConsumer
+}
+
+// Inquirer is used to retrieve info about the validating network
+type Inquirer interface {
+ GetNetworkInfo() (self *pb.PeerEndpoint, network []*pb.PeerEndpoint, err error)
+ GetNetworkHandles() (self *pb.PeerID, network []*pb.PeerID, err error)
+}
+
+// Communicator is used to send messages to other validators
+type Communicator interface {
+ Broadcast(msg *pb.Message, peerType pb.PeerEndpoint_Type) error
+ Unicast(msg *pb.Message, receiverHandle *pb.PeerID) error
+}
+
+// NetworkStack is used to retrieve network info and send messages
+type NetworkStack interface {
+ Communicator
+ Inquirer
+}
+
+// SecurityUtils is used to access the sign/verify methods from the crypto package
+type SecurityUtils interface {
+ Sign(msg []byte) ([]byte, error)
+ Verify(peerID *pb.PeerID, signature []byte, message []byte) error
+}
+
+// ReadOnlyLedger is used for interrogating the blockchain
+type ReadOnlyLedger interface {
+ GetBlock(id uint64) (block *pb.Block, err error)
+ GetBlockchainSize() uint64
+ GetBlockchainInfo() *pb.BlockchainInfo
+ GetBlockchainInfoBlob() []byte
+ GetBlockHeadMetadata() ([]byte, error)
+}
+
+// LegacyExecutor is used to invoke transactions, potentially modifying the backing ledger
+type LegacyExecutor interface {
+ BeginTxBatch(id interface{}) error
+ ExecTxs(id interface{}, txs []*pb.Transaction) ([]byte, error)
+ CommitTxBatch(id interface{}, metadata []byte) (*pb.Block, error)
+ RollbackTxBatch(id interface{}) error
+ PreviewCommitTxBatch(id interface{}, metadata []byte) ([]byte, error)
+}
+
+// Executor is intended to eventually supplant the old Executor interface
+// The problem with invoking the calls directly above, is that they must be coordinated
+// with state transfer, to eliminate possible races and ledger corruption
+type Executor interface {
+ Start() // Bring up the resources needed to use this interface
+ Halt() // Tear down the resources needed to use this interface
+ Execute(tag interface{}, txs []*pb.Transaction) // Executes a set of transactions, this may be called in succession
+ Commit(tag interface{}, metadata []byte) // Commits whatever transactions have been executed
+ Rollback(tag interface{}) // Rolls back whatever transactions have been executed
+ UpdateState(tag interface{}, target *pb.BlockchainInfo, peers []*pb.PeerID) // Attempts to synchronize state to a particular target, implicitly calls rollback if needed
+}
+
+// LedgerManager is used to manipulate the state of the ledger
+type LedgerManager interface {
+ InvalidateState() // Invalidate informs the ledger that it is out of date and should reject queries
+ ValidateState() // Validate informs the ledger that it is back up to date and should resume replying to queries
+}
+
+// StatePersistor is used to store consensus state which should survive a process crash
+type StatePersistor interface {
+ StoreState(key string, value []byte) error
+ ReadState(key string) ([]byte, error)
+ ReadStateSet(prefix string) (map[string][]byte, error)
+ DelState(key string)
+}
+
+// Stack is the set of stack-facing methods available to the consensus plugin
+type Stack interface {
+ NetworkStack
+ SecurityUtils
+ Executor
+ LegacyExecutor
+ LedgerManager
+ ReadOnlyLedger
+ StatePersistor
+}
diff --git a/consensus/controller/controller.go b/consensus/controller/controller.go
new file mode 100644
index 00000000000..c869cb6d883
--- /dev/null
+++ b/consensus/controller/controller.go
@@ -0,0 +1,48 @@
+/*
+Copyright IBM Corp. 2016 All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package controller
+
+import (
+ "strings"
+
+ "github.com/op/go-logging"
+ "github.com/spf13/viper"
+
+ "github.com/hyperledger/fabric/consensus"
+ "github.com/hyperledger/fabric/consensus/noops"
+ "github.com/hyperledger/fabric/consensus/pbft"
+)
+
+var logger *logging.Logger // package-level logger
+var consenter consensus.Consenter
+
+func init() {
+ logger = logging.MustGetLogger("consensus/controller")
+}
+
+// NewConsenter constructs a Consenter object if not already present
+func NewConsenter(stack consensus.Stack) consensus.Consenter {
+
+ plugin := strings.ToLower(viper.GetString("peer.validator.consensus.plugin"))
+ if plugin == "pbft" {
+ logger.Infof("Creating consensus plugin %s", plugin)
+ return pbft.GetPlugin(stack)
+ }
+ logger.Info("Creating default consensus plugin (noops)")
+ return noops.GetNoops(stack)
+
+}
diff --git a/consensus/executor/executor.go b/consensus/executor/executor.go
new file mode 100644
index 00000000000..bab2fa2f8e4
--- /dev/null
+++ b/consensus/executor/executor.go
@@ -0,0 +1,206 @@
+/*
+Copyright IBM Corp. 2016 All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package executor
+
+import (
+ "github.com/hyperledger/fabric/consensus"
+ "github.com/hyperledger/fabric/consensus/util/events"
+ "github.com/hyperledger/fabric/core/peer/statetransfer"
+ pb "github.com/hyperledger/fabric/protos"
+
+ "github.com/op/go-logging"
+)
+
+var logger *logging.Logger // package-level logger
+
+func init() {
+ logger = logging.MustGetLogger("consensus/executor")
+}
+
+// PartialStack contains the ledger features required by the executor.Coordinator
+type PartialStack interface {
+ consensus.LegacyExecutor
+ GetBlockchainInfo() *pb.BlockchainInfo
+}
+
+type coordinatorImpl struct {
+ manager events.Manager // Maintains event thread and sends events to the coordinator
+ rawExecutor PartialStack // Does the real interaction with the ledger
+ consumer consensus.ExecutionConsumer // The consumer of this coordinator which receives the callbacks
+ stc statetransfer.Coordinator // State transfer instance
+ batchInProgress bool // Are we mid execution batch
+ skipInProgress bool // Are we mid state transfer
+}
+
+// NewCoordinatorImpl creates a new executor.Coordinator
+func NewImpl(consumer consensus.ExecutionConsumer, rawExecutor PartialStack, stps statetransfer.PartialStack) consensus.Executor {
+ co := &coordinatorImpl{
+ rawExecutor: rawExecutor,
+ consumer: consumer,
+ stc: statetransfer.NewCoordinatorImpl(stps),
+ manager: events.NewManagerImpl(),
+ }
+ co.manager.SetReceiver(co)
+ return co
+}
+
+// ProcessEvent is the main event loop for the executor.Coordinator
+func (co *coordinatorImpl) ProcessEvent(event events.Event) events.Event {
+ switch et := event.(type) {
+ case executeEvent:
+ logger.Debug("Executor is processing an executeEvent")
+ if co.skipInProgress {
+ logger.Error("FATAL programming error, attempted to execute a transaction during state transfer")
+ return nil
+ }
+
+ if !co.batchInProgress {
+ logger.Debug("Starting new transaction batch")
+ co.batchInProgress = true
+ err := co.rawExecutor.BeginTxBatch(co)
+ _ = err // TODO This should probably panic, see issue 752
+ }
+
+ co.rawExecutor.ExecTxs(co, et.txs)
+
+ co.consumer.Executed(et.tag)
+ case commitEvent:
+ logger.Debug("Executor is processing an commitEvent")
+ if co.skipInProgress {
+ logger.Error("Likely FATAL programming error, attempted to commit a transaction batch during state transfer")
+ return nil
+ }
+
+ if !co.batchInProgress {
+ logger.Error("Likely FATAL programming error, attemted to commit a transaction batch when one does not exist")
+ return nil
+ }
+
+ _, err := co.rawExecutor.CommitTxBatch(co, et.metadata)
+ _ = err // TODO This should probably panic, see issue 752
+
+ co.batchInProgress = false
+
+ info := co.rawExecutor.GetBlockchainInfo()
+
+ logger.Debugf("Committed block %d with hash %x to chain", info.Height-1, info.CurrentBlockHash)
+
+ co.consumer.Committed(et.tag, info)
+ case rollbackEvent:
+ logger.Debug("Executor is processing an rollbackEvent")
+ if co.skipInProgress {
+ logger.Error("Programming error, attempted to rollback a transaction batch during state transfer")
+ return nil
+ }
+
+ if !co.batchInProgress {
+ logger.Error("Programming error, attempted to rollback a transaction batch which had not started")
+ return nil
+ }
+
+ err := co.rawExecutor.RollbackTxBatch(co)
+ _ = err // TODO This should probably panic, see issue 752
+
+ co.batchInProgress = false
+
+ co.consumer.RolledBack(et.tag)
+ case stateUpdateEvent:
+ logger.Debug("Executor is processing a stateUpdateEvent")
+ if co.batchInProgress {
+ err := co.rawExecutor.RollbackTxBatch(co)
+ _ = err // TODO This should probably panic, see issue 752
+ }
+
+ co.skipInProgress = true
+
+ info := et.blockchainInfo
+ for {
+ err, recoverable := co.stc.SyncToTarget(info.Height-1, info.CurrentBlockHash, et.peers)
+ if err == nil {
+ co.skipInProgress = false
+ co.consumer.StateUpdated(et.tag, info)
+ return nil
+ }
+ if !recoverable {
+ logger.Warningf("State transfer failed irrecoverably, calling back to consumer: %s", err)
+ co.consumer.StateUpdated(et.tag, nil)
+ return nil
+ }
+ logger.Warningf("State transfer did not complete successfully but is recoverable, trying again: %s", err)
+ et.peers = nil // Broaden the peers included in recover to all connected
+ }
+ default:
+ logger.Errorf("Unknown event type %s", et)
+ }
+
+ return nil
+}
+
+// Commit commits whatever outstanding requests have been executed, it is an error to call this without pending executions
+func (co *coordinatorImpl) Commit(tag interface{}, metadata []byte) {
+ co.manager.Queue() <- commitEvent{tag, metadata}
+}
+
+// Execute adds additional executions to the current batch
+func (co *coordinatorImpl) Execute(tag interface{}, txs []*pb.Transaction) {
+ co.manager.Queue() <- executeEvent{tag, txs}
+}
+
+// Rollback rolls back the executions from the current batch
+func (co *coordinatorImpl) Rollback(tag interface{}) {
+ co.manager.Queue() <- rollbackEvent{tag}
+}
+
+// UpdateState uses the state transfer subsystem to attempt to progress to a target
+func (co *coordinatorImpl) UpdateState(tag interface{}, info *pb.BlockchainInfo, peers []*pb.PeerID) {
+ co.manager.Queue() <- stateUpdateEvent{tag, info, peers}
+}
+
+// Start must be called before utilizing the Coordinator
+func (co *coordinatorImpl) Start() {
+ co.stc.Start()
+ co.manager.Start()
+}
+
+// Halt should be called to clean up resources allocated by the Coordinator
+func (co *coordinatorImpl) Halt() {
+ co.stc.Stop()
+ co.manager.Halt()
+}
+
+// Event types
+
+type executeEvent struct {
+ tag interface{}
+ txs []*pb.Transaction
+}
+
+// Note, this cannot be a simple type alias, in case tag is nil
+type rollbackEvent struct {
+ tag interface{}
+}
+
+type commitEvent struct {
+ tag interface{}
+ metadata []byte
+}
+
+type stateUpdateEvent struct {
+ tag interface{}
+ blockchainInfo *pb.BlockchainInfo
+ peers []*pb.PeerID
+}
diff --git a/consensus/executor/executor_test.go b/consensus/executor/executor_test.go
new file mode 100644
index 00000000000..06a34caffea
--- /dev/null
+++ b/consensus/executor/executor_test.go
@@ -0,0 +1,522 @@
+/*
+Copyright IBM Corp. 2016 All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package executor
+
+import (
+ "bytes"
+ "fmt"
+ "testing"
+
+ "github.com/hyperledger/fabric/consensus/util/events"
+
+ pb "github.com/hyperledger/fabric/protos"
+
+ "github.com/op/go-logging"
+)
+
+func init() {
+ logging.SetLevel(logging.DEBUG, "")
+}
+
+// -------------------------
+//
+// Mock consumer
+//
+// -------------------------
+
+type mockConsumer struct {
+ ExecutedImpl func(tag interface{}) // Called whenever Execute completes
+ CommittedImpl func(tag interface{}, target *pb.BlockchainInfo) // Called whenever Commit completes
+ RolledBackImpl func(tag interface{}) // Called whenever a Rollback completes
+ StateUpdatedImpl func(tag interface{}, target *pb.BlockchainInfo) // Called when state transfer completes, if target is nil, this indicates a failure and a new target should be supplied
+}
+
+func (mock *mockConsumer) Executed(tag interface{}) {
+ if mock.ExecutedImpl != nil {
+ mock.ExecutedImpl(tag)
+ }
+}
+
+func (mock *mockConsumer) Committed(tag interface{}, target *pb.BlockchainInfo) {
+ if mock.CommittedImpl != nil {
+ mock.CommittedImpl(tag, target)
+ }
+}
+
+func (mock *mockConsumer) RolledBack(tag interface{}) {
+ if mock.RolledBackImpl != nil {
+ mock.RolledBackImpl(tag)
+ }
+}
+
+func (mock *mockConsumer) StateUpdated(tag interface{}, target *pb.BlockchainInfo) {
+ if mock.StateUpdatedImpl != nil {
+ mock.StateUpdatedImpl(tag, target)
+ }
+}
+
+// -------------------------
+//
+// Mock rawExecutor
+//
+// -------------------------
+
+type mockRawExecutor struct {
+ t *testing.T
+ curBatch interface{}
+ curTxs []*pb.Transaction
+ commitCount uint64
+}
+
+func (mock *mockRawExecutor) BeginTxBatch(id interface{}) error {
+ if mock.curBatch != nil {
+ e := fmt.Errorf("Attempted to start a new batch without stopping the other")
+ mock.t.Fatal(e)
+ return e
+ }
+ mock.curBatch = id
+ return nil
+}
+
+func (mock *mockRawExecutor) ExecTxs(id interface{}, txs []*pb.Transaction) ([]byte, error) {
+ if mock.curBatch != id {
+ e := fmt.Errorf("Attempted to exec on a different batch")
+ mock.t.Fatal(e)
+ return nil, e
+ }
+ mock.curTxs = append(mock.curTxs, txs...)
+ return nil, nil
+}
+
+func (mock *mockRawExecutor) CommitTxBatch(id interface{}, meta []byte) (*pb.Block, error) {
+ if mock.curBatch != id {
+ e := fmt.Errorf("Attempted to commit a batch which doesn't exist")
+ mock.t.Fatal(e)
+ return nil, e
+ }
+ mock.commitCount++
+ return nil, nil
+}
+
+func (mock *mockRawExecutor) RollbackTxBatch(id interface{}) error {
+ if mock.curBatch == nil {
+ e := fmt.Errorf("Attempted to rollback a batch which doesn't exist")
+ mock.t.Fatal(e)
+ return e
+ }
+
+ mock.curTxs = nil
+ mock.curBatch = nil
+
+ return nil
+}
+
+func (mock *mockRawExecutor) PreviewCommitTxBatch(id interface{}, meta []byte) ([]byte, error) {
+ if mock.curBatch != nil {
+ e := fmt.Errorf("Attempted to preview a batch which doesn't exist")
+ mock.t.Fatal(e)
+ return nil, e
+ }
+
+ return nil, nil
+}
+
+func (mock *mockRawExecutor) GetBlockchainInfo() *pb.BlockchainInfo {
+ return &pb.BlockchainInfo{
+ Height: mock.commitCount,
+ CurrentBlockHash: []byte(fmt.Sprintf("%d", mock.commitCount)),
+ PreviousBlockHash: []byte(fmt.Sprintf("%d", mock.commitCount-1)),
+ }
+}
+
+// -------------------------
+//
+// Mock stateTransfer
+//
+// -------------------------
+
+type mockStateTransfer struct {
+ StartImpl func()
+ StopImpl func()
+ SyncToTargetImpl func(blockNumber uint64, blockHash []byte, peerIDs []*pb.PeerID) (error, bool)
+}
+
+func (mock *mockStateTransfer) Start() {}
+func (mock *mockStateTransfer) Stop() {}
+
+func (mock *mockStateTransfer) SyncToTarget(blockNumber uint64, blockHash []byte, peerIDs []*pb.PeerID) (error, bool) {
+ if mock.SyncToTargetImpl != nil {
+ return mock.SyncToTargetImpl(blockNumber, blockHash, peerIDs)
+ }
+ return nil, false
+}
+
+// -------------------------
+//
+// Mock event manager
+//
+// -------------------------
+
+type mockEventManager struct {
+ target events.Receiver
+ bufferedChannel chan events.Event // This is buffered so that queueing never blocks
+}
+
+func (mock *mockEventManager) Start() {}
+
+func (mock *mockEventManager) Halt() {}
+
+func (mock *mockEventManager) Inject(event events.Event) {}
+
+func (mock *mockEventManager) SetReceiver(receiver events.Receiver) {
+ mock.target = receiver
+}
+
+func (mock *mockEventManager) Queue() chan<- events.Event {
+ return mock.bufferedChannel
+}
+
+func (mock *mockEventManager) process() {
+ for {
+ select {
+ case ev := <-mock.bufferedChannel:
+ events.SendEvent(mock.target, ev)
+ default:
+ return
+ }
+ }
+}
+
+// -------------------------
+//
+// Util functions
+//
+// -------------------------
+
+func newMocks(t *testing.T) (*coordinatorImpl, *mockConsumer, *mockRawExecutor, *mockStateTransfer, *mockEventManager) {
+ mc := &mockConsumer{}
+ mre := &mockRawExecutor{t: t}
+ mst := &mockStateTransfer{}
+ mev := &mockEventManager{bufferedChannel: make(chan events.Event, 100)}
+ co := &coordinatorImpl{
+ consumer: mc,
+ rawExecutor: mre,
+ stc: mst,
+ manager: mev,
+ }
+ mev.target = co
+ return co, mc, mre, mst, mev
+}
+
+// -------------------------
+//
+// Actual Tests
+//
+// -------------------------
+
+// TestNormalExecutes executes 50 transactions, then commits, ensuring that the callbacks are called appropriately
+func TestNormalExecutes(t *testing.T) {
+ co, mc, _, _, mev := newMocks(t)
+
+ times := uint64(50)
+
+ id := struct{}{}
+ testTxs := []*pb.Transaction{&pb.Transaction{}, &pb.Transaction{}, &pb.Transaction{}}
+
+ executed := uint64(0)
+ mc.ExecutedImpl = func(tag interface{}) {
+ if tag != id {
+ t.Fatalf("Executed got wrong ID")
+ }
+ executed++
+ }
+
+ committed := false
+ mc.CommittedImpl = func(tag interface{}, info *pb.BlockchainInfo) {
+ if tag != id {
+ t.Fatalf("Committed got wrong ID")
+ }
+ committed = true
+ if info.Height != 1 {
+ t.Fatalf("Blockchain info should have returned height of %d, returned %d", 1, info.Height)
+ }
+ }
+
+ for i := uint64(0); i < times; i++ {
+ co.Execute(id, testTxs)
+ }
+
+ co.Commit(id, nil)
+ mev.process()
+
+ if executed != times {
+ t.Fatalf("Should have executed %d times but executed %d times", times, executed)
+ }
+
+ if !committed {
+ t.Fatalf("Should have committed")
+ }
+}
+
+// TestRollbackExecutes executes 5 transactions, then rolls back, executes 5 more and commits, ensuring that the callbacks are called appropriately
+func TestRollbackExecutes(t *testing.T) {
+ co, mc, _, _, mev := newMocks(t)
+
+ times := uint64(5)
+
+ id := struct{}{}
+ testTxs := []*pb.Transaction{&pb.Transaction{}, &pb.Transaction{}, &pb.Transaction{}}
+
+ executed := uint64(0)
+ mc.ExecutedImpl = func(tag interface{}) {
+ if tag != id {
+ t.Fatalf("Executed got wrong ID")
+ }
+ executed++
+ }
+
+ committed := false
+ mc.CommittedImpl = func(tag interface{}, info *pb.BlockchainInfo) {
+ if tag != id {
+ t.Fatalf("Committed got wrong ID")
+ }
+ committed = true
+ if info.Height != 1 {
+ t.Fatalf("Blockchain info should have returned height of %d, returned %d", 1, info.Height)
+ }
+ }
+
+ rolledBack := false
+ mc.RolledBackImpl = func(tag interface{}) {
+ if tag != id {
+ t.Fatalf("RolledBack got wrong ID")
+ }
+ rolledBack = true
+ }
+
+ for i := uint64(0); i < times; i++ {
+ co.Execute(id, testTxs)
+ }
+
+ co.Rollback(id)
+ mev.process()
+
+ if !rolledBack {
+ t.Fatalf("Should have rolled back")
+ }
+
+ for i := uint64(0); i < times; i++ {
+ co.Execute(id, testTxs)
+ }
+ co.Commit(id, nil)
+ mev.process()
+
+ if executed != 2*times {
+ t.Fatalf("Should have executed %d times but executed %d times", 2*times, executed)
+ }
+
+ if !committed {
+ t.Fatalf("Should have committed")
+ }
+}
+
+// TestEmptyCommit attempts to commit without executing any transactions, this is considered a fatal error and no callback should occur
+func TestEmptyCommit(t *testing.T) {
+ co, mc, _, _, mev := newMocks(t)
+
+ mc.CommittedImpl = func(tag interface{}, info *pb.BlockchainInfo) {
+ t.Fatalf("Should not have committed")
+ }
+
+ co.Commit(nil, nil)
+ mev.process()
+}
+
+// TestEmptyRollback attempts a rollback without executing any transactions, this is considered an error and no callback should occur
+func TestEmptyRollback(t *testing.T) {
+ co, mc, _, _, mev := newMocks(t)
+
+ mc.RolledBackImpl = func(tag interface{}) {
+ t.Fatalf("Should not have committed")
+ }
+
+ co.Rollback(nil)
+ mev.process()
+}
+
+// TestNormalStateTransfer attempts a simple state transfer request with 10 recoverable failures
+func TestNormalStateTransfer(t *testing.T) {
+ co, mc, _, mst, mev := newMocks(t)
+ //co, mc, mre, mst, mev := newMocks(t)
+
+ id := struct{}{}
+ blockNumber := uint64(2389)
+ blockHash := []byte("BlockHash")
+
+ stateUpdated := false
+ mc.StateUpdatedImpl = func(tag interface{}, info *pb.BlockchainInfo) {
+ if id != tag {
+ t.Fatalf("Incorrect tag received")
+ }
+ if stateUpdated {
+ t.Fatalf("State should only be updated once")
+ }
+ if info.Height != blockNumber+1 {
+ t.Fatalf("Final height should have been %d", blockNumber+1)
+ }
+ if !bytes.Equal(info.CurrentBlockHash, blockHash) {
+ t.Fatalf("Final height should have been %d", blockNumber+1)
+ }
+ stateUpdated = true
+ }
+
+ count := 0
+ mst.SyncToTargetImpl = func(bn uint64, bh []byte, ps []*pb.PeerID) (error, bool) {
+ count++
+ if count <= 10 {
+ return fmt.Errorf("Transient state transfer error"), true
+ }
+
+ return nil, true
+ }
+
+ co.UpdateState(id, &pb.BlockchainInfo{Height: blockNumber + 1, CurrentBlockHash: blockHash}, nil)
+ mev.process()
+
+ if !stateUpdated {
+ t.Fatalf("State should have been updated")
+ }
+}
+
+// TestFailingStateTransfer attempts a failing simple state transfer request with 10 recoverable failures, then a fatal error, then a success
+func TestFailingStateTransfer(t *testing.T) {
+ co, mc, _, mst, mev := newMocks(t)
+ //co, mc, mre, mst, mev := newMocks(t)
+
+ id := struct{}{}
+ blockNumber1 := uint64(1)
+ blockHash1 := []byte("BlockHash1")
+ blockNumber2 := uint64(2)
+ blockHash2 := []byte("BlockHash2")
+
+ stateUpdated := false
+ mc.StateUpdatedImpl = func(tag interface{}, info *pb.BlockchainInfo) {
+ if id != tag {
+ t.Fatalf("Incorrect tag received")
+ }
+ if stateUpdated {
+ t.Fatalf("State should only be updated once")
+ }
+ if info == nil {
+ return
+ }
+ if info.Height != blockNumber2+1 {
+ t.Fatalf("Final height should have been %d", blockNumber2+1)
+ }
+ if !bytes.Equal(info.CurrentBlockHash, blockHash2) {
+ t.Fatalf("Final height should have been %d", blockNumber2+1)
+ }
+ stateUpdated = true
+ }
+
+ count := 0
+ mst.SyncToTargetImpl = func(bn uint64, bh []byte, ps []*pb.PeerID) (error, bool) {
+ count++
+ if count <= 10 {
+ return fmt.Errorf("Transient state transfer error"), true
+ }
+
+ if bn == blockNumber1 {
+ return fmt.Errorf("Irrecoverable state transfer error"), false
+ }
+
+ return nil, true
+ }
+
+ co.UpdateState(id, &pb.BlockchainInfo{Height: blockNumber1 + 1, CurrentBlockHash: blockHash1}, nil)
+ mev.process()
+
+ if stateUpdated {
+ t.Fatalf("State should not have been updated")
+ }
+
+ co.UpdateState(id, &pb.BlockchainInfo{Height: blockNumber2 + 1, CurrentBlockHash: blockHash2}, nil)
+ mev.process()
+
+ if !stateUpdated {
+ t.Fatalf("State should have been updated")
+ }
+}
+
+// TestExecuteAfterStateTransfer attempts an execute and commit after a simple state transfer request
+func TestExecuteAfterStateTransfer(t *testing.T) {
+ co, mc, _, _, mev := newMocks(t)
+ testTxs := []*pb.Transaction{&pb.Transaction{}, &pb.Transaction{}, &pb.Transaction{}}
+
+ id := struct{}{}
+ blockNumber := uint64(2389)
+ blockHash := []byte("BlockHash")
+
+ stateTransferred := false
+ mc.StateUpdatedImpl = func(tag interface{}, info *pb.BlockchainInfo) {
+ if nil == info {
+ t.Fatalf("State transfer should have succeeded")
+ }
+ stateTransferred = true
+ }
+
+ executed := false
+ mc.ExecutedImpl = func(tag interface{}) {
+ executed = true
+ }
+
+ co.UpdateState(id, &pb.BlockchainInfo{Height: blockNumber + 1, CurrentBlockHash: blockHash}, nil)
+ co.Execute(id, testTxs)
+ mev.process()
+
+ if !executed {
+ t.Fatalf("Execution should have occurred")
+ }
+}
+
+// TestExecuteDuringStateTransfer attempts a state transfer which fails, then an execute which should not be performed
+func TestExecuteDuringStateTransfer(t *testing.T) {
+ co, mc, mre, mst, mev := newMocks(t)
+ testTxs := []*pb.Transaction{&pb.Transaction{}, &pb.Transaction{}, &pb.Transaction{}}
+
+ id := struct{}{}
+ blockNumber := uint64(2389)
+ blockHash := []byte("BlockHash")
+
+ mc.StateUpdatedImpl = func(tag interface{}, info *pb.BlockchainInfo) {
+ if info != nil {
+ t.Fatalf("State transfer should not succeed")
+ }
+ }
+
+ mst.SyncToTargetImpl = func(bn uint64, bh []byte, ps []*pb.PeerID) (error, bool) {
+ return fmt.Errorf("Irrecoverable error"), false
+ }
+
+ co.UpdateState(id, &pb.BlockchainInfo{Height: blockNumber + 1, CurrentBlockHash: blockHash}, nil)
+ co.Execute(id, testTxs)
+ mev.process()
+
+ if mre.curBatch != nil {
+ t.Fatalf("Execution should not have executed beginning a new batch")
+ }
+}
diff --git a/consensus/helper/engine.go b/consensus/helper/engine.go
new file mode 100644
index 00000000000..e7defc8ae26
--- /dev/null
+++ b/consensus/helper/engine.go
@@ -0,0 +1,132 @@
+/*
+Copyright IBM Corp. 2016 All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package helper
+
+import (
+ "github.com/hyperledger/fabric/consensus"
+ "github.com/hyperledger/fabric/core/peer"
+
+ "fmt"
+ "sync"
+
+ "github.com/hyperledger/fabric/consensus/controller"
+ "github.com/hyperledger/fabric/consensus/util"
+ "github.com/hyperledger/fabric/core/chaincode"
+ pb "github.com/hyperledger/fabric/protos"
+ "golang.org/x/net/context"
+)
+
+// EngineImpl implements a struct to hold consensus.Consenter, PeerEndpoint and MessageFan
+type EngineImpl struct {
+ consenter consensus.Consenter
+ helper *Helper
+ peerEndpoint *pb.PeerEndpoint
+ consensusFan *util.MessageFan
+}
+
+// GetHandlerFactory returns new NewConsensusHandler
+func (eng *EngineImpl) GetHandlerFactory() peer.HandlerFactory {
+ return NewConsensusHandler
+}
+
+// ProcessTransactionMsg processes a Message in context of a Transaction
+func (eng *EngineImpl) ProcessTransactionMsg(msg *pb.Message, tx *pb.Transaction) (response *pb.Response) {
+ //TODO: Do we always verify security, or can we supply a flag on the invoke ot this functions so to bypass check for locally generated transactions?
+ if tx.Type == pb.Transaction_CHAINCODE_QUERY {
+ if !engine.helper.valid {
+ logger.Warning("Rejecting query because state is currently not valid")
+ return &pb.Response{Status: pb.Response_FAILURE,
+ Msg: []byte("Error: state may be inconsistent, cannot query")}
+ }
+
+ // The secHelper is set during creat ChaincodeSupport, so we don't need this step
+ // cxt := context.WithValue(context.Background(), "security", secHelper)
+ cxt := context.Background()
+ //query will ignore events as these are not stored on ledger (and query can report
+ //"event" data synchronously anyway)
+ result, _, err := chaincode.Execute(cxt, chaincode.GetChain(chaincode.DefaultChain), tx)
+ if err != nil {
+ response = &pb.Response{Status: pb.Response_FAILURE,
+ Msg: []byte(fmt.Sprintf("Error:%s", err))}
+ } else {
+ response = &pb.Response{Status: pb.Response_SUCCESS, Msg: result}
+ }
+ } else {
+ // Chaincode Transaction
+ response = &pb.Response{Status: pb.Response_SUCCESS, Msg: []byte(tx.Uuid)}
+
+ //TODO: Do we need to verify security, or can we supply a flag on the invoke ot this functions
+ // If we fail to marshal or verify the tx, don't send it to consensus plugin
+ if response.Status == pb.Response_FAILURE {
+ return response
+ }
+
+ // Pass the message to the consenter (eg. PBFT) NOTE: Make sure engine has been initialized
+ if eng.consenter == nil {
+ return &pb.Response{Status: pb.Response_FAILURE, Msg: []byte("Engine not initialized")}
+ }
+ // TODO, do we want to put these requests into a queue? This will block until
+ // the consenter gets around to handling the message, but it also provides some
+ // natural feedback to the REST API to determine how long it takes to queue messages
+ err := eng.consenter.RecvMsg(msg, eng.peerEndpoint.ID)
+ if err != nil {
+ response = &pb.Response{Status: pb.Response_FAILURE, Msg: []byte(err.Error())}
+ }
+ }
+ return response
+}
+
+func (eng *EngineImpl) setConsenter(consenter consensus.Consenter) *EngineImpl {
+ eng.consenter = consenter
+ return eng
+}
+
+func (eng *EngineImpl) setPeerEndpoint(peerEndpoint *pb.PeerEndpoint) *EngineImpl {
+ eng.peerEndpoint = peerEndpoint
+ return eng
+}
+
+var engineOnce sync.Once
+
+var engine *EngineImpl
+
+func getEngineImpl() *EngineImpl {
+ return engine
+}
+
+// GetEngine returns initialized peer.Engine
+func GetEngine(coord peer.MessageHandlerCoordinator) (peer.Engine, error) {
+ var err error
+ engineOnce.Do(func() {
+ engine = new(EngineImpl)
+ engine.helper = NewHelper(coord)
+ engine.consenter = controller.NewConsenter(engine.helper)
+ engine.helper.setConsenter(engine.consenter)
+ engine.peerEndpoint, err = coord.GetPeerEndpoint()
+ engine.consensusFan = util.NewMessageFan()
+
+ go func() {
+ logger.Debug("Starting up message thread for consenter")
+
+ // The channel never closes, so this should never break
+ for msg := range engine.consensusFan.GetOutChannel() {
+ engine.consenter.RecvMsg(msg.Msg, msg.Sender)
+ }
+ }()
+ })
+ return engine, err
+}
diff --git a/consensus/helper/engine_test.go b/consensus/helper/engine_test.go
new file mode 100644
index 00000000000..3b067f55d98
--- /dev/null
+++ b/consensus/helper/engine_test.go
@@ -0,0 +1,23 @@
+/*
+Copyright IBM Corp. 2016 All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package helper
+
+import "testing"
+
+func TestEngine(t *testing.T) {
+ t.Skip("Engine functions already tested in other consensus components")
+}
diff --git a/consensus/helper/handler.go b/consensus/helper/handler.go
new file mode 100644
index 00000000000..2ce99e16b69
--- /dev/null
+++ b/consensus/helper/handler.go
@@ -0,0 +1,102 @@
+/*
+Copyright IBM Corp. 2016 All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package helper
+
+import (
+ "fmt"
+
+ "github.com/op/go-logging"
+ "github.com/spf13/viper"
+
+ "github.com/hyperledger/fabric/consensus/util"
+ "github.com/hyperledger/fabric/core/peer"
+
+ pb "github.com/hyperledger/fabric/protos"
+)
+
+var logger *logging.Logger // package-level logger
+
+func init() {
+ logger = logging.MustGetLogger("consensus/handler")
+}
+
+const (
+ // DefaultConsensusQueueSize value of 1000
+ DefaultConsensusQueueSize int = 1000
+)
+
+// ConsensusHandler handles consensus messages.
+// It also implements the Stack.
+type ConsensusHandler struct {
+ peer.MessageHandler
+ consenterChan chan *util.Message
+ coordinator peer.MessageHandlerCoordinator
+}
+
+// NewConsensusHandler constructs a new MessageHandler for the plugin.
+// Is instance of peer.HandlerFactory
+func NewConsensusHandler(coord peer.MessageHandlerCoordinator,
+ stream peer.ChatStream, initiatedStream bool,
+ next peer.MessageHandler) (peer.MessageHandler, error) {
+
+ peerHandler, err := peer.NewPeerHandler(coord, stream, initiatedStream, nil)
+ if err != nil {
+ return nil, fmt.Errorf("Error creating PeerHandler: %s", err)
+ }
+
+ handler := &ConsensusHandler{
+ MessageHandler: peerHandler,
+ coordinator: coord,
+ }
+
+ consensusQueueSize := viper.GetInt("peer.validator.consensus.buffersize")
+
+ if consensusQueueSize <= 0 {
+ logger.Errorf("peer.validator.consensus.buffersize is set to %d, but this must be a positive integer, defaulting to %d", consensusQueueSize, DefaultConsensusQueueSize)
+ consensusQueueSize = DefaultConsensusQueueSize
+ }
+
+ pe, _ := handler.To()
+
+ handler.consenterChan = make(chan *util.Message, consensusQueueSize)
+ getEngineImpl().consensusFan.RegisterChannel(pe.ID, handler.consenterChan)
+
+ return handler, nil
+}
+
+// HandleMessage handles the incoming Fabric messages for the Peer
+func (handler *ConsensusHandler) HandleMessage(msg *pb.Message) error {
+ if msg.Type == pb.Message_CONSENSUS {
+ senderPE, _ := handler.To()
+ select {
+ case handler.consenterChan <- &util.Message{
+ Msg: msg,
+ Sender: senderPE.ID,
+ }:
+ return nil
+ default:
+ err := fmt.Errorf("Message channel for %v full, rejecting", senderPE.ID)
+ logger.Errorf("Failed to queue consensus message because: %v", err)
+ return err
+ }
+ }
+
+ if logger.IsEnabledFor(logging.DEBUG) {
+ logger.Debugf("Did not handle message of type %s, passing on to next MessageHandler", msg.Type)
+ }
+ return handler.MessageHandler.HandleMessage(msg)
+}
diff --git a/consensus/helper/handler_test.go b/consensus/helper/handler_test.go
new file mode 100644
index 00000000000..54d4cad841c
--- /dev/null
+++ b/consensus/helper/handler_test.go
@@ -0,0 +1,23 @@
+/*
+Copyright IBM Corp. 2016 All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package helper
+
+import "testing"
+
+func TestHandler(t *testing.T) {
+ t.Skip("Handler functions already tested in other consensus components")
+}
diff --git a/consensus/helper/helper.go b/consensus/helper/helper.go
new file mode 100644
index 00000000000..ad473fcf228
--- /dev/null
+++ b/consensus/helper/helper.go
@@ -0,0 +1,388 @@
+/*
+Copyright IBM Corp. 2016 All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package helper
+
+import (
+ "fmt"
+
+ "github.com/golang/protobuf/proto"
+ "github.com/spf13/viper"
+ "golang.org/x/net/context"
+
+ "github.com/hyperledger/fabric/consensus"
+ "github.com/hyperledger/fabric/consensus/executor"
+ "github.com/hyperledger/fabric/consensus/helper/persist"
+ "github.com/hyperledger/fabric/core/chaincode"
+ crypto "github.com/hyperledger/fabric/core/crypto"
+ "github.com/hyperledger/fabric/core/ledger"
+ "github.com/hyperledger/fabric/core/peer"
+ pb "github.com/hyperledger/fabric/protos"
+)
+
+// Helper contains the reference to the peer's MessageHandlerCoordinator
+type Helper struct {
+ consenter consensus.Consenter
+ coordinator peer.MessageHandlerCoordinator
+ secOn bool
+ valid bool // Whether we believe the state is up to date
+ secHelper crypto.Peer
+ curBatch []*pb.Transaction // TODO, remove after issue 579
+ curBatchErrs []*pb.TransactionResult // TODO, remove after issue 579
+ persist.Helper
+
+ executor consensus.Executor
+}
+
+// NewHelper constructs the consensus helper object
+func NewHelper(mhc peer.MessageHandlerCoordinator) *Helper {
+ h := &Helper{
+ coordinator: mhc,
+ secOn: viper.GetBool("security.enabled"),
+ secHelper: mhc.GetSecHelper(),
+ valid: true, // Assume our state is consistent until we are told otherwise, TODO: revisit
+ }
+
+ h.executor = executor.NewImpl(h, h, mhc)
+ h.executor.Start()
+ return h
+}
+
+func (h *Helper) setConsenter(c consensus.Consenter) {
+ h.consenter = c
+}
+
+// GetNetworkInfo returns the PeerEndpoints of the current validator and the entire validating network
+func (h *Helper) GetNetworkInfo() (self *pb.PeerEndpoint, network []*pb.PeerEndpoint, err error) {
+ ep, err := h.coordinator.GetPeerEndpoint()
+ if err != nil {
+ return self, network, fmt.Errorf("Couldn't retrieve own endpoint: %v", err)
+ }
+ self = ep
+
+ peersMsg, err := h.coordinator.GetPeers()
+ if err != nil {
+ return self, network, fmt.Errorf("Couldn't retrieve list of peers: %v", err)
+ }
+ peers := peersMsg.GetPeers()
+ for _, endpoint := range peers {
+ if endpoint.Type == pb.PeerEndpoint_VALIDATOR {
+ network = append(network, endpoint)
+ }
+ }
+ network = append(network, self)
+
+ return
+}
+
+// GetNetworkHandles returns the PeerIDs of the current validator and the entire validating network
+func (h *Helper) GetNetworkHandles() (self *pb.PeerID, network []*pb.PeerID, err error) {
+ selfEP, networkEP, err := h.GetNetworkInfo()
+ if err != nil {
+ return self, network, fmt.Errorf("Couldn't retrieve validating network's endpoints: %v", err)
+ }
+
+ self = selfEP.ID
+
+ for _, endpoint := range networkEP {
+ network = append(network, endpoint.ID)
+ }
+ network = append(network, self)
+
+ return
+}
+
+// Broadcast sends a message to all validating peers
+func (h *Helper) Broadcast(msg *pb.Message, peerType pb.PeerEndpoint_Type) error {
+ errors := h.coordinator.Broadcast(msg, peerType)
+ if len(errors) > 0 {
+ return fmt.Errorf("Couldn't broadcast successfully")
+ }
+ return nil
+}
+
+// Unicast sends a message to a specified receiver
+func (h *Helper) Unicast(msg *pb.Message, receiverHandle *pb.PeerID) error {
+ return h.coordinator.Unicast(msg, receiverHandle)
+}
+
+// Sign a message with this validator's signing key
+func (h *Helper) Sign(msg []byte) ([]byte, error) {
+ if h.secOn {
+ return h.secHelper.Sign(msg)
+ }
+ logger.Debug("Security is disabled")
+ return msg, nil
+}
+
+// Verify that the given signature is valid under the given replicaID's verification key
+// If replicaID is nil, use this validator's verification key
+// If the signature is valid, the function should return nil
+func (h *Helper) Verify(replicaID *pb.PeerID, signature []byte, message []byte) error {
+ if !h.secOn {
+ logger.Debug("Security is disabled")
+ return nil
+ }
+
+ logger.Debugf("Verify message from: %v", replicaID.Name)
+ _, network, err := h.GetNetworkInfo()
+ if err != nil {
+ return fmt.Errorf("Couldn't retrieve validating network's endpoints: %v", err)
+ }
+
+ // check that the sender is a valid replica
+ // if so, call crypto verify() with that endpoint's pkiID
+ for _, endpoint := range network {
+ logger.Debugf("Endpoint name: %v", endpoint.ID.Name)
+ if *replicaID == *endpoint.ID {
+ cryptoID := endpoint.PkiID
+ return h.secHelper.Verify(cryptoID, signature, message)
+ }
+ }
+ return fmt.Errorf("Could not verify message from %s (unknown peer)", replicaID.Name)
+}
+
+// BeginTxBatch gets invoked when the next round
+// of transaction-batch execution begins
+func (h *Helper) BeginTxBatch(id interface{}) error {
+ ledger, err := ledger.GetLedger()
+ if err != nil {
+ return fmt.Errorf("Failed to get the ledger: %v", err)
+ }
+ if err := ledger.BeginTxBatch(id); err != nil {
+ return fmt.Errorf("Failed to begin transaction with the ledger: %v", err)
+ }
+ h.curBatch = nil // TODO, remove after issue 579
+ h.curBatchErrs = nil // TODO, remove after issue 579
+ return nil
+}
+
+// ExecTxs executes all the transactions listed in the txs array
+// one-by-one. If all the executions are successful, it returns
+// the candidate global state hash, and nil error array.
+func (h *Helper) ExecTxs(id interface{}, txs []*pb.Transaction) ([]byte, error) {
+ // TODO id is currently ignored, fix once the underlying implementation accepts id
+
+ // The secHelper is set during creat ChaincodeSupport, so we don't need this step
+ // cxt := context.WithValue(context.Background(), "security", h.coordinator.GetSecHelper())
+ // TODO return directly once underlying implementation no longer returns []error
+
+ succeededTxs, res, ccevents, txerrs, err := chaincode.ExecuteTransactions(context.Background(), chaincode.DefaultChain, txs)
+
+ h.curBatch = append(h.curBatch, succeededTxs...) // TODO, remove after issue 579
+
+ //copy errs to result
+ txresults := make([]*pb.TransactionResult, len(txerrs))
+
+ //process errors for each transaction
+ for i, e := range txerrs {
+ //NOTE- it'll be nice if we can have error values. For now success == 0, error == 1
+ if txerrs[i] != nil {
+ txresults[i] = &pb.TransactionResult{Uuid: txs[i].Uuid, Error: e.Error(), ErrorCode: 1, ChaincodeEvent: ccevents[i]}
+ } else {
+ txresults[i] = &pb.TransactionResult{Uuid: txs[i].Uuid, ChaincodeEvent: ccevents[i]}
+ }
+ }
+ h.curBatchErrs = append(h.curBatchErrs, txresults...) // TODO, remove after issue 579
+
+ return res, err
+}
+
+// CommitTxBatch gets invoked when the current transaction-batch needs
+// to be committed. This function returns successfully iff the
+// transactions details and state changes (that may have happened
+// during execution of this transaction-batch) have been committed to
+// permanent storage.
+func (h *Helper) CommitTxBatch(id interface{}, metadata []byte) (*pb.Block, error) {
+ ledger, err := ledger.GetLedger()
+ if err != nil {
+ return nil, fmt.Errorf("Failed to get the ledger: %v", err)
+ }
+ // TODO fix this one the ledger has been fixed to implement
+ if err := ledger.CommitTxBatch(id, h.curBatch, h.curBatchErrs, metadata); err != nil {
+ return nil, fmt.Errorf("Failed to commit transaction to the ledger: %v", err)
+ }
+
+ size := ledger.GetBlockchainSize()
+ defer func() {
+ h.curBatch = nil // TODO, remove after issue 579
+ h.curBatchErrs = nil // TODO, remove after issue 579
+ }()
+
+ block, err := ledger.GetBlockByNumber(size - 1)
+ if err != nil {
+ return nil, fmt.Errorf("Failed to get the block at the head of the chain: %v", err)
+ }
+
+ logger.Debugf("Committed block with %d transactions, intended to include %d", len(block.Transactions), len(h.curBatch))
+
+ return block, nil
+}
+
+// RollbackTxBatch discards all the state changes that may have taken
+// place during the execution of current transaction-batch
+func (h *Helper) RollbackTxBatch(id interface{}) error {
+ ledger, err := ledger.GetLedger()
+ if err != nil {
+ return fmt.Errorf("Failed to get the ledger: %v", err)
+ }
+ if err := ledger.RollbackTxBatch(id); err != nil {
+ return fmt.Errorf("Failed to rollback transaction with the ledger: %v", err)
+ }
+ h.curBatch = nil // TODO, remove after issue 579
+ h.curBatchErrs = nil // TODO, remove after issue 579
+ return nil
+}
+
+// PreviewCommitTxBatch retrieves a preview of the block info blob (as
+// returned by GetBlockchainInfoBlob) that would describe the
+// blockchain if CommitTxBatch were invoked. The blockinfo will
+// change if additional ExecTXs calls are invoked.
+func (h *Helper) PreviewCommitTxBatch(id interface{}, metadata []byte) ([]byte, error) {
+ ledger, err := ledger.GetLedger()
+ if err != nil {
+ return nil, fmt.Errorf("Failed to get the ledger: %v", err)
+ }
+ // TODO fix this once the underlying API is fixed
+ blockInfo, err := ledger.GetTXBatchPreviewBlockInfo(id, h.curBatch, metadata)
+ if err != nil {
+ return nil, fmt.Errorf("Failed to preview commit: %v", err)
+ }
+ rawInfo, _ := proto.Marshal(blockInfo)
+ return rawInfo, nil
+}
+
+// GetBlock returns a block from the chain
+func (h *Helper) GetBlock(blockNumber uint64) (block *pb.Block, err error) {
+ ledger, err := ledger.GetLedger()
+ if err != nil {
+ return nil, fmt.Errorf("Failed to get the ledger :%v", err)
+ }
+ return ledger.GetBlockByNumber(blockNumber)
+}
+
+// GetCurrentStateHash returns the current/temporary state hash
+func (h *Helper) GetCurrentStateHash() (stateHash []byte, err error) {
+ ledger, err := ledger.GetLedger()
+ if err != nil {
+ return nil, fmt.Errorf("Failed to get the ledger :%v", err)
+ }
+ return ledger.GetTempStateHash()
+}
+
+// GetBlockchainSize returns the current size of the blockchain
+func (h *Helper) GetBlockchainSize() uint64 {
+ return h.coordinator.GetBlockchainSize()
+}
+
+// GetBlockchainInfo gets the ledger's BlockchainInfo
+func (h *Helper) GetBlockchainInfo() *pb.BlockchainInfo {
+ ledger, _ := ledger.GetLedger()
+ info, _ := ledger.GetBlockchainInfo()
+ return info
+}
+
+// GetBlockchainInfoBlob marshals a ledger's BlockchainInfo into a protobuf
+func (h *Helper) GetBlockchainInfoBlob() []byte {
+ ledger, _ := ledger.GetLedger()
+ info, _ := ledger.GetBlockchainInfo()
+ rawInfo, _ := proto.Marshal(info)
+ return rawInfo
+}
+
+// GetBlockHeadMetadata returns metadata from block at the head of the blockchain
+func (h *Helper) GetBlockHeadMetadata() ([]byte, error) {
+ ledger, err := ledger.GetLedger()
+ if err != nil {
+ return nil, err
+ }
+ head := ledger.GetBlockchainSize()
+ block, err := ledger.GetBlockByNumber(head - 1)
+ if err != nil {
+ return nil, err
+ }
+ return block.ConsensusMetadata, nil
+}
+
+// InvalidateState is invoked to tell us that consensus realizes the ledger is out of sync
+func (h *Helper) InvalidateState() {
+ logger.Debug("Invalidating the current state")
+ h.valid = false
+}
+
+// ValidateState is invoked to tell us that consensus has the ledger back in sync
+func (h *Helper) ValidateState() {
+ logger.Debug("Validating the current state")
+ h.valid = true
+}
+
+// Execute will execute a set of transactions, this may be called in succession
+func (h *Helper) Execute(tag interface{}, txs []*pb.Transaction) {
+ h.executor.Execute(tag, txs)
+}
+
+// Commit will commit whatever transactions have been executed
+func (h *Helper) Commit(tag interface{}, metadata []byte) {
+ h.executor.Commit(tag, metadata)
+}
+
+// Rollback will roll back whatever transactions have been executed
+func (h *Helper) Rollback(tag interface{}) {
+ h.executor.Rollback(tag)
+}
+
+// UpdateState attempts to synchronize state to a particular target, implicitly calls rollback if needed
+func (h *Helper) UpdateState(tag interface{}, target *pb.BlockchainInfo, peers []*pb.PeerID) {
+ if h.valid {
+ logger.Warning("State transfer is being called for, but the state has not been invalidated")
+ }
+
+ h.executor.UpdateState(tag, target, peers)
+}
+
+// Executed is called whenever Execute completes
+func (h *Helper) Executed(tag interface{}) {
+ if h.consenter != nil {
+ h.consenter.Executed(tag)
+ }
+}
+
+// Committed is called whenever Commit completes
+func (h *Helper) Committed(tag interface{}, target *pb.BlockchainInfo) {
+ if h.consenter != nil {
+ h.consenter.Committed(tag, target)
+ }
+}
+
+// RolledBack is called whenever a Rollback completes
+func (h *Helper) RolledBack(tag interface{}) {
+ if h.consenter != nil {
+ h.consenter.RolledBack(tag)
+ }
+}
+
+// StateUpdated is called when state transfer completes, if target is nil, this indicates a failure and a new target should be supplied
+func (h *Helper) StateUpdated(tag interface{}, target *pb.BlockchainInfo) {
+ if h.consenter != nil {
+ h.consenter.StateUpdated(tag, target)
+ }
+}
+
+// Start his is a byproduct of the consensus API needing some cleaning, for now it's a no-op
+func (h *Helper) Start() {}
+
+// Halt is a byproduct of the consensus API needing some cleaning, for now it's a no-op
+func (h *Helper) Halt() {}
diff --git a/consensus/helper/helper_test.go b/consensus/helper/helper_test.go
new file mode 100644
index 00000000000..4c12c87bbc4
--- /dev/null
+++ b/consensus/helper/helper_test.go
@@ -0,0 +1,23 @@
+/*
+Copyright IBM Corp. 2016 All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package helper
+
+import "testing"
+
+func TestHelper(t *testing.T) {
+ t.Skip("Helper functions already tested in other consensus components")
+}
diff --git a/consensus/helper/persist/persist.go b/consensus/helper/persist/persist.go
new file mode 100644
index 00000000000..4456e835322
--- /dev/null
+++ b/consensus/helper/persist/persist.go
@@ -0,0 +1,60 @@
+/*
+Copyright IBM Corp. 2016 All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package persist
+
+import (
+ "github.com/hyperledger/fabric/core/db"
+)
+
+// Helper provides an abstraction to access the Persist column family
+// in the database.
+type Helper struct{}
+
+// StoreState stores a key,value pair
+func (h *Helper) StoreState(key string, value []byte) error {
+ db := db.GetDBHandle()
+ return db.Put(db.PersistCF, []byte("consensus."+key), value)
+}
+
+// DelState removes a key,value pair
+func (h *Helper) DelState(key string) {
+ db := db.GetDBHandle()
+ db.Delete(db.PersistCF, []byte("consensus."+key))
+}
+
+// ReadState retrieves a value to a key
+func (h *Helper) ReadState(key string) ([]byte, error) {
+ db := db.GetDBHandle()
+ return db.Get(db.PersistCF, []byte("consensus."+key))
+}
+
+// ReadStateSet retrieves all key,value pairs where the key starts with prefix
+func (h *Helper) ReadStateSet(prefix string) (map[string][]byte, error) {
+ db := db.GetDBHandle()
+ prefixRaw := []byte("consensus." + prefix)
+
+ ret := make(map[string][]byte)
+ it := db.GetIterator(db.PersistCF)
+ defer it.Close()
+ for it.Seek(prefixRaw); it.ValidForPrefix(prefixRaw); it.Next() {
+ key := string(it.Key().Data())
+ key = key[len("consensus."):]
+ // copy data from the slice!
+ ret[key] = append([]byte(nil), it.Value().Data()...)
+ }
+ return ret, nil
+}
diff --git a/consensus/helper/persist/persist_test.go b/consensus/helper/persist/persist_test.go
new file mode 100644
index 00000000000..73bd0c7c87d
--- /dev/null
+++ b/consensus/helper/persist/persist_test.go
@@ -0,0 +1,23 @@
+/*
+Copyright IBM Corp. 2016 All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package persist
+
+import "testing"
+
+func TestPersist(t *testing.T) {
+ t.Skip("Persist functions already tested in other consensus components")
+}
diff --git a/consensus/noops/config.yaml b/consensus/noops/config.yaml
new file mode 100644
index 00000000000..cabc92c51ec
--- /dev/null
+++ b/consensus/noops/config.yaml
@@ -0,0 +1,22 @@
+---
+###############################################################################
+#
+# NOOPS PROPERTIES
+#
+# These properties may be passed as environment variables when starting up
+# a validating peer with prefix CORE_NOOPS. For example:
+# CORE_NOOPS_BLOCK_SIZE=1000
+# CORE_NOOPS_BLOCK_WAIT=2
+#
+###############################################################################
+
+# Define properties for a block: A block is created whenever "size" or "wait"
+# occurs. When we process a block, we grab all transactions in the queue, so
+# the number of transactions in a block may be greater than the "size".
+block:
+ # Number of transactions per block. Must be > 0. Set to 1 for testing
+ size: 500
+
+ # Time to wait for a block.
+ # The default unit of measure is seconds. Otherwise, specify ms (milliseconds), us (microseconds), ns (nanoseconds), m (minutes) or h (hours)
+ wait: 1s
diff --git a/consensus/noops/configutil.go b/consensus/noops/configutil.go
new file mode 100644
index 00000000000..60cacd333e0
--- /dev/null
+++ b/consensus/noops/configutil.go
@@ -0,0 +1,53 @@
+/*
+Copyright IBM Corp. 2016 All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package noops
+
+import (
+ "fmt"
+ "os"
+ "path/filepath"
+ "strings"
+
+ "github.com/spf13/viper"
+)
+
+const configPrefix = "CORE_NOOPS"
+
+func loadConfig() (config *viper.Viper) {
+ config = viper.New()
+
+ // for environment variables
+ config.SetEnvPrefix(configPrefix)
+ config.AutomaticEnv()
+ replacer := strings.NewReplacer(".", "_")
+ config.SetEnvKeyReplacer(replacer)
+
+ config.SetConfigName("config")
+ config.AddConfigPath("./")
+ config.AddConfigPath("../consensus/noops/")
+ // Path to look for the config file in based on GOPATH
+ gopath := os.Getenv("GOPATH")
+ for _, p := range filepath.SplitList(gopath) {
+ path := filepath.Join(p, "src/github.com/hyperledger/fabric/consensus/noops")
+ config.AddConfigPath(path)
+ }
+ err := config.ReadInConfig()
+ if err != nil {
+ panic(fmt.Errorf("Error reading %s plugin config: %s", configPrefix, err))
+ }
+ return config
+}
diff --git a/consensus/noops/noops.go b/consensus/noops/noops.go
new file mode 100644
index 00000000000..db64177e9c9
--- /dev/null
+++ b/consensus/noops/noops.go
@@ -0,0 +1,315 @@
+/*
+Copyright IBM Corp. 2016 All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package noops
+
+import (
+ "fmt"
+ "strconv"
+ "time"
+
+ "github.com/golang/protobuf/proto"
+ "github.com/op/go-logging"
+
+ "github.com/hyperledger/fabric/consensus"
+ "github.com/hyperledger/fabric/core/ledger"
+ "github.com/hyperledger/fabric/core/ledger/statemgmt"
+ "github.com/hyperledger/fabric/core/util"
+ pb "github.com/hyperledger/fabric/protos"
+)
+
+var logger *logging.Logger // package-level logger
+
+func init() {
+ logger = logging.MustGetLogger("consensus/noops")
+}
+
+// Noops is a plugin object implementing the consensus.Consenter interface.
+type Noops struct {
+ stack consensus.Stack
+ txQ *txq
+ timer *time.Timer
+ duration time.Duration
+ channel chan *pb.Transaction
+}
+
+// Setting up a singleton NOOPS consenter
+var iNoops consensus.Consenter
+
+// GetNoops returns a singleton of NOOPS
+func GetNoops(c consensus.Stack) consensus.Consenter {
+ if iNoops == nil {
+ iNoops = newNoops(c)
+ }
+ return iNoops
+}
+
+// newNoops is a constructor returning a consensus.Consenter object.
+func newNoops(c consensus.Stack) consensus.Consenter {
+ var err error
+ if logger.IsEnabledFor(logging.DEBUG) {
+ logger.Debug("Creating a NOOPS object")
+ }
+ i := &Noops{}
+ i.stack = c
+ config := loadConfig()
+ blockSize := config.GetInt("block.size")
+ blockWait := config.GetString("block.wait")
+ if _, err = strconv.Atoi(blockWait); err == nil {
+ blockWait = blockWait + "s" //if string does not have unit of measure, default to seconds
+ }
+ i.duration, err = time.ParseDuration(blockWait)
+ if err != nil || i.duration == 0 {
+ panic(fmt.Errorf("Cannot parse block wait: %s", err))
+ }
+
+ logger.Infof("NOOPS consensus type = %T", i)
+ logger.Infof("NOOPS block size = %v", blockSize)
+ logger.Infof("NOOPS block wait = %v", i.duration)
+
+ i.txQ = newTXQ(blockSize)
+
+ i.channel = make(chan *pb.Transaction, 100)
+ i.timer = time.NewTimer(i.duration) // start timer now so we can just reset it
+ i.timer.Stop()
+ go i.handleChannels()
+ return i
+}
+
+// RecvMsg is called for Message_CHAIN_TRANSACTION and Message_CONSENSUS messages.
+func (i *Noops) RecvMsg(msg *pb.Message, senderHandle *pb.PeerID) error {
+ if logger.IsEnabledFor(logging.DEBUG) {
+ logger.Debugf("Handling Message of type: %s ", msg.Type)
+ }
+ if msg.Type == pb.Message_CHAIN_TRANSACTION {
+ if err := i.broadcastConsensusMsg(msg); nil != err {
+ return err
+ }
+ }
+ if msg.Type == pb.Message_CONSENSUS {
+ tx, err := i.getTxFromMsg(msg)
+ if nil != err {
+ return err
+ }
+ if logger.IsEnabledFor(logging.DEBUG) {
+ logger.Debugf("Sending to channel tx uuid: %s", tx.Uuid)
+ }
+ i.channel <- tx
+ }
+ return nil
+}
+
+func (i *Noops) broadcastConsensusMsg(msg *pb.Message) error {
+ t := &pb.Transaction{}
+ if err := proto.Unmarshal(msg.Payload, t); err != nil {
+ return fmt.Errorf("Error unmarshalling payload of received Message:%s.", msg.Type)
+ }
+
+ // Change the msg type to consensus and broadcast to the network so that
+ // other validators may execute the transaction
+ msg.Type = pb.Message_CONSENSUS
+ if logger.IsEnabledFor(logging.DEBUG) {
+ logger.Debugf("Broadcasting %s", msg.Type)
+ }
+ txs := &pb.TransactionBlock{Transactions: []*pb.Transaction{t}}
+ payload, err := proto.Marshal(txs)
+ if err != nil {
+ return err
+ }
+ msg.Payload = payload
+ if errs := i.stack.Broadcast(msg, pb.PeerEndpoint_VALIDATOR); nil != errs {
+ return fmt.Errorf("Failed to broadcast with errors: %v", errs)
+ }
+ return nil
+}
+
+func (i *Noops) canProcessBlock(tx *pb.Transaction) bool {
+ // For NOOPS, if we have completed the sync since we last connected,
+ // we can assume that we are at the current state; otherwise, we need to
+ // wait for the sync process to complete before we can exec the transactions
+
+ // TODO: Ask coordinator if we need to start sync
+
+ i.txQ.append(tx)
+
+ // start timer if we get a tx
+ if i.txQ.size() == 1 {
+ i.timer.Reset(i.duration)
+ }
+ return i.txQ.isFull()
+}
+
+func (i *Noops) handleChannels() {
+ // Noops is a singleton object and only exits when peer exits, so we
+ // don't need a condition to exit this loop
+ for {
+ select {
+ case tx := <-i.channel:
+ if i.canProcessBlock(tx) {
+ if logger.IsEnabledFor(logging.DEBUG) {
+ logger.Debug("Process block due to size")
+ }
+ if err := i.processBlock(); nil != err {
+ logger.Error(err.Error())
+ }
+ }
+ case <-i.timer.C:
+ if logger.IsEnabledFor(logging.DEBUG) {
+ logger.Debug("Process block due to time")
+ }
+ if err := i.processBlock(); nil != err {
+ logger.Error(err.Error())
+ }
+ }
+ }
+}
+
+func (i *Noops) processBlock() error {
+ i.timer.Stop()
+
+ if i.txQ.size() < 1 {
+ if logger.IsEnabledFor(logging.DEBUG) {
+ logger.Debug("processBlock() called but transaction Q is empty")
+ }
+ return nil
+ }
+ var data *pb.Block
+ var delta *statemgmt.StateDelta
+ var err error
+
+ if err = i.processTransactions(); nil != err {
+ return err
+ }
+ if data, delta, err = i.getBlockData(); nil != err {
+ return err
+ }
+ go i.notifyBlockAdded(data, delta)
+ return nil
+}
+
+func (i *Noops) processTransactions() error {
+ timestamp := util.CreateUtcTimestamp()
+ if logger.IsEnabledFor(logging.DEBUG) {
+ logger.Debugf("Starting TX batch with timestamp: %v", timestamp)
+ }
+ if err := i.stack.BeginTxBatch(timestamp); err != nil {
+ return err
+ }
+
+ // Grab all transactions from the FIFO queue and run them in order
+ txarr := i.txQ.getTXs()
+ if logger.IsEnabledFor(logging.DEBUG) {
+ logger.Debugf("Executing batch of %d transactions with timestamp %v", len(txarr), timestamp)
+ }
+ _, err := i.stack.ExecTxs(timestamp, txarr)
+
+ //consensus does not need to understand transaction errors, errors here are
+ //actual ledger errors, and often irrecoverable
+ if err != nil {
+ logger.Debugf("Rolling back TX batch with timestamp: %v", timestamp)
+ i.stack.RollbackTxBatch(timestamp)
+ return fmt.Errorf("Fail to execute transactions: %v", err)
+ }
+ if logger.IsEnabledFor(logging.DEBUG) {
+ logger.Debugf("Committing TX batch with timestamp: %v", timestamp)
+ }
+ if _, err := i.stack.CommitTxBatch(timestamp, nil); err != nil {
+ logger.Debugf("Rolling back TX batch with timestamp: %v", timestamp)
+ i.stack.RollbackTxBatch(timestamp)
+ return err
+ }
+ return nil
+}
+
+func (i *Noops) getTxFromMsg(msg *pb.Message) (*pb.Transaction, error) {
+ txs := &pb.TransactionBlock{}
+ if err := proto.Unmarshal(msg.Payload, txs); err != nil {
+ return nil, err
+ }
+ return txs.GetTransactions()[0], nil
+}
+
+func (i *Noops) getBlockData() (*pb.Block, *statemgmt.StateDelta, error) {
+ ledger, err := ledger.GetLedger()
+ if err != nil {
+ return nil, nil, fmt.Errorf("Fail to get the ledger: %v", err)
+ }
+
+ blockHeight := ledger.GetBlockchainSize()
+ if logger.IsEnabledFor(logging.DEBUG) {
+ logger.Debugf("Preparing to broadcast with block number %v", blockHeight)
+ }
+ block, err := ledger.GetBlockByNumber(blockHeight - 1)
+ if nil != err {
+ return nil, nil, err
+ }
+ //delta, err := ledger.GetStateDeltaBytes(blockHeight)
+ delta, err := ledger.GetStateDelta(blockHeight - 1)
+ if nil != err {
+ return nil, nil, err
+ }
+ if logger.IsEnabledFor(logging.DEBUG) {
+ logger.Debugf("Got the delta state of block number %v", blockHeight)
+ }
+
+ return block, delta, nil
+}
+
+func (i *Noops) notifyBlockAdded(block *pb.Block, delta *statemgmt.StateDelta) error {
+ //make Payload nil to reduce block size..
+ //anything else to remove .. do we need StateDelta ?
+ for _, tx := range block.Transactions {
+ tx.Payload = nil
+ }
+ data, err := proto.Marshal(&pb.BlockState{Block: block, StateDelta: delta.Marshal()})
+ if err != nil {
+ return fmt.Errorf("Fail to marshall BlockState structure: %v", err)
+ }
+ if logger.IsEnabledFor(logging.DEBUG) {
+ logger.Debug("Broadcasting Message_SYNC_BLOCK_ADDED to non-validators")
+ }
+
+ // Broadcast SYNC_BLOCK_ADDED to connected NVPs
+ // VPs already know about this newly added block since they participate
+ // in the execution. That is, they can compare their current block with
+ // the network block
+ msg := &pb.Message{Type: pb.Message_SYNC_BLOCK_ADDED,
+ Payload: data, Timestamp: util.CreateUtcTimestamp()}
+ if errs := i.stack.Broadcast(msg, pb.PeerEndpoint_NON_VALIDATOR); nil != errs {
+ return fmt.Errorf("Failed to broadcast with errors: %v", errs)
+ }
+ return nil
+}
+
+// Executed is called whenever Execute completes, no-op for noops as it uses the legacy synchronous api
+func (i *Noops) Executed(tag interface{}) {
+ // Never called
+}
+
+// Committed is called whenever Commit completes, no-op for noops as it uses the legacy synchronous api
+func (i *Noops) Committed(tag interface{}, target *pb.BlockchainInfo) {
+ // Never called
+}
+
+// RolledBack is called whenever a Rollback completes, no-op for noops as it uses the legacy synchronous api
+func (i *Noops) RolledBack(tag interface{}) {
+ // Never called
+}
+
+// StatedUpdates is called when state transfer completes, if target is nil, this indicates a failure and a new target should be supplied, no-op for noops as it uses the legacy synchronous api
+func (i *Noops) StateUpdated(tag interface{}, target *pb.BlockchainInfo) {
+ // Never called
+}
diff --git a/consensus/noops/txq.go b/consensus/noops/txq.go
new file mode 100644
index 00000000000..ac7a22e1404
--- /dev/null
+++ b/consensus/noops/txq.go
@@ -0,0 +1,64 @@
+/*
+Copyright IBM Corp. 2016 All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package noops
+
+import (
+ pb "github.com/hyperledger/fabric/protos"
+)
+
+type txq struct {
+ i int
+ q []*pb.Transaction
+}
+
+func newTXQ(size int) *txq {
+ o := &txq{}
+ o.i = 0
+ if size < 1 {
+ size = 1
+ }
+ o.q = make([]*pb.Transaction, size)
+ return o
+}
+
+func (o *txq) append(tx *pb.Transaction) {
+ if cap(o.q) > o.i {
+ o.q[o.i] = tx
+ o.i++
+ }
+}
+
+func (o *txq) getTXs() []*pb.Transaction {
+ length := o.i
+ o.i = 0
+ return o.q[:length]
+}
+
+func (o *txq) isFull() bool {
+ if cap(o.q) == o.i {
+ return true
+ }
+ return false
+}
+
+func (o *txq) size() int {
+ return o.i
+}
+
+func (o *txq) reset() {
+ o.i = 0
+}
diff --git a/consensus/pbft/batch.go b/consensus/pbft/batch.go
new file mode 100644
index 00000000000..ce834aac852
--- /dev/null
+++ b/consensus/pbft/batch.go
@@ -0,0 +1,473 @@
+/*
+Copyright IBM Corp. 2016 All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package pbft
+
+import (
+ "fmt"
+ "google/protobuf"
+ "time"
+
+ "github.com/hyperledger/fabric/consensus"
+ "github.com/hyperledger/fabric/consensus/util/events"
+ pb "github.com/hyperledger/fabric/protos"
+
+ "github.com/golang/protobuf/proto"
+ "github.com/op/go-logging"
+ "github.com/spf13/viper"
+)
+
+type obcBatch struct {
+ obcGeneric
+ externalEventReceiver
+ pbft *pbftCore
+ broadcaster *broadcaster
+
+ batchSize int
+ batchStore []*Request
+ batchTimer events.Timer
+ batchTimerActive bool
+ batchTimeout time.Duration
+
+ manager events.Manager // TODO, remove eventually, the event manager
+
+ incomingChan chan *batchMessage // Queues messages for processing by main thread
+ idleChan chan struct{} // Idle channel, to be removed
+
+ reqStore *requestStore // Holds the outstanding and pending requests
+
+ deduplicator *deduplicator
+
+ persistForward
+}
+
+type batchMessage struct {
+ msg *pb.Message
+ sender *pb.PeerID
+}
+
+// Event types
+
+// batchMessageEvent is sent when a consensus message is received that is then to be sent to pbft
+type batchMessageEvent batchMessage
+
+// batchTimerEvent is sent when the batch timer expires
+type batchTimerEvent struct{}
+
+func newObcBatch(id uint64, config *viper.Viper, stack consensus.Stack) *obcBatch {
+ var err error
+
+ op := &obcBatch{
+ obcGeneric: obcGeneric{stack: stack},
+ }
+
+ op.persistForward.persistor = stack
+
+ logger.Debugf("Replica %d obtaining startup information", id)
+
+ op.manager = events.NewManagerImpl() // TODO, this is hacky, eventually rip it out
+ op.manager.SetReceiver(op)
+ etf := events.NewTimerFactoryImpl(op.manager)
+ op.pbft = newPbftCore(id, config, op, etf)
+ op.manager.Start()
+ op.externalEventReceiver.manager = op.manager
+ op.broadcaster = newBroadcaster(id, op.pbft.N, op.pbft.f, stack)
+
+ op.batchSize = config.GetInt("general.batchsize")
+ op.batchStore = nil
+ op.batchTimeout, err = time.ParseDuration(config.GetString("general.timeout.batch"))
+ if err != nil {
+ panic(fmt.Errorf("Cannot parse batch timeout: %s", err))
+ }
+ logger.Infof("PBFT Batch size = %d", op.batchSize)
+ logger.Infof("PBFT Batch timeout = %v", op.batchTimeout)
+
+ if op.batchTimeout >= op.pbft.requestTimeout {
+ op.pbft.requestTimeout = 3 * op.batchTimeout / 2
+ logger.Warningf("Configured request timeout must be greater than batch timeout, setting to %v", op.pbft.requestTimeout)
+ }
+
+ if op.pbft.requestTimeout >= op.pbft.nullRequestTimeout && op.pbft.nullRequestTimeout != 0 {
+ op.pbft.nullRequestTimeout = 3 * op.pbft.requestTimeout / 2
+ logger.Warningf("Configured null request timeout must be greater than request timeout, setting to %v", op.pbft.nullRequestTimeout)
+ }
+
+ op.incomingChan = make(chan *batchMessage)
+
+ op.batchTimer = etf.CreateTimer()
+
+ op.reqStore = newRequestStore()
+
+ op.deduplicator = newDeduplicator()
+
+ op.idleChan = make(chan struct{})
+ close(op.idleChan) // TODO remove eventually
+
+ return op
+}
+
+// Close tells us to release resources we are holding
+func (op *obcBatch) Close() {
+ op.batchTimer.Halt()
+ op.pbft.close()
+}
+
+func (op *obcBatch) submitToLeader(req *Request) events.Event {
+ // Broadcast the request to the network, in case we're in the wrong view
+ op.broadcastMsg(&BatchMessage{Payload: &BatchMessage_Request{Request: req}})
+ op.logAddTxFromRequest(req)
+ op.reqStore.storeOutstanding(req)
+ op.startTimerIfOutstandingRequests()
+ if op.pbft.primary(op.pbft.view) == op.pbft.id && op.pbft.activeView {
+ return op.leaderProcReq(req)
+ }
+ return nil
+}
+
+func (op *obcBatch) broadcastMsg(msg *BatchMessage) {
+ msgPayload, _ := proto.Marshal(msg)
+ ocMsg := &pb.Message{
+ Type: pb.Message_CONSENSUS,
+ Payload: msgPayload,
+ }
+ op.broadcaster.Broadcast(ocMsg)
+}
+
+// send a message to a specific replica
+func (op *obcBatch) unicastMsg(msg *BatchMessage, receiverID uint64) {
+ msgPayload, _ := proto.Marshal(msg)
+ ocMsg := &pb.Message{
+ Type: pb.Message_CONSENSUS,
+ Payload: msgPayload,
+ }
+ op.broadcaster.Unicast(ocMsg, receiverID)
+}
+
+// =============================================================================
+// innerStack interface (functions called by pbft-core)
+// =============================================================================
+
+// multicast a message to all replicas
+func (op *obcBatch) broadcast(msgPayload []byte) {
+ op.broadcaster.Broadcast(op.wrapMessage(msgPayload))
+}
+
+// send a message to a specific replica
+func (op *obcBatch) unicast(msgPayload []byte, receiverID uint64) (err error) {
+ return op.broadcaster.Unicast(op.wrapMessage(msgPayload), receiverID)
+}
+
+func (op *obcBatch) sign(msg []byte) ([]byte, error) {
+ return op.stack.Sign(msg)
+}
+
+// verify message signature
+func (op *obcBatch) verify(senderID uint64, signature []byte, message []byte) error {
+ senderHandle, err := getValidatorHandle(senderID)
+ if err != nil {
+ return err
+ }
+ return op.stack.Verify(senderHandle, signature, message)
+}
+
+// execute an opaque request which corresponds to an OBC Transaction
+func (op *obcBatch) execute(seqNo uint64, reqBatch *RequestBatch) {
+ var txs []*pb.Transaction
+ for _, req := range reqBatch.GetBatch() {
+ tx := &pb.Transaction{}
+ if err := proto.Unmarshal(req.Payload, tx); err != nil {
+ logger.Warningf("Batch replica %d could not unmarshal transaction %s", op.pbft.id, err)
+ continue
+ }
+ logger.Debugf("Batch replica %d executing request with transaction %s from outstandingReqs, seqNo=%d", op.pbft.id, tx.Uuid, seqNo)
+ if outstanding, pending := op.reqStore.remove(req); !outstanding || !pending {
+ logger.Debugf("Batch replica %d missing transaction %s outstanding=%v, pending=%v", op.pbft.id, tx.Uuid, outstanding, pending)
+ }
+ txs = append(txs, tx)
+ op.deduplicator.Execute(req)
+ }
+ meta, _ := proto.Marshal(&Metadata{seqNo})
+ logger.Debugf("Batch replica %d received exec for seqNo %d containing %d transactions", op.pbft.id, seqNo, len(txs))
+ op.stack.Execute(meta, txs) // This executes in the background, we will receive an executedEvent once it completes
+}
+
+// =============================================================================
+// functions specific to batch mode
+// =============================================================================
+
+func (op *obcBatch) leaderProcReq(req *Request) events.Event {
+ // XXX check req sig
+ digest := hash(req)
+ logger.Debugf("Batch primary %d queueing new request %s", op.pbft.id, digest)
+ op.batchStore = append(op.batchStore, req)
+ op.reqStore.storePending(req)
+
+ if !op.batchTimerActive {
+ op.startBatchTimer()
+ }
+
+ if len(op.batchStore) >= op.batchSize {
+ return op.sendBatch()
+ }
+
+ return nil
+}
+
+func (op *obcBatch) sendBatch() events.Event {
+ op.stopBatchTimer()
+ if len(op.batchStore) == 0 {
+ logger.Error("Told to send an empty batch store for ordering, ignoring")
+ return nil
+ }
+
+ reqBatch := &RequestBatch{Batch: op.batchStore}
+ op.batchStore = nil
+ logger.Infof("Creating batch with %d requests", len(reqBatch.Batch))
+ return reqBatch
+}
+
+func (op *obcBatch) txToReq(tx []byte) *Request {
+ now := time.Now()
+ req := &Request{
+ Timestamp: &google_protobuf.Timestamp{
+ Seconds: now.Unix(),
+ Nanos: int32(now.UnixNano() % 1000000000),
+ },
+ Payload: tx,
+ ReplicaId: op.pbft.id,
+ }
+ // XXX sign req
+ return req
+}
+
+func (op *obcBatch) processMessage(ocMsg *pb.Message, senderHandle *pb.PeerID) events.Event {
+ if ocMsg.Type == pb.Message_CHAIN_TRANSACTION {
+ req := op.txToReq(ocMsg.Payload)
+ return op.submitToLeader(req)
+ }
+
+ if ocMsg.Type != pb.Message_CONSENSUS {
+ logger.Errorf("Unexpected message type: %s", ocMsg.Type)
+ return nil
+ }
+
+ batchMsg := &BatchMessage{}
+ err := proto.Unmarshal(ocMsg.Payload, batchMsg)
+ if err != nil {
+ logger.Errorf("Error unmarshaling message: %s", err)
+ return nil
+ }
+
+ if req := batchMsg.GetRequest(); req != nil {
+ if !op.deduplicator.IsNew(req) {
+ logger.Warningf("Replica %d ignoring request as it is too old", op.pbft.id)
+ return nil
+ }
+
+ op.logAddTxFromRequest(req)
+ op.reqStore.storeOutstanding(req)
+ if (op.pbft.primary(op.pbft.view) == op.pbft.id) && op.pbft.activeView {
+ return op.leaderProcReq(req)
+ }
+ op.startTimerIfOutstandingRequests()
+ return nil
+ } else if pbftMsg := batchMsg.GetPbftMessage(); pbftMsg != nil {
+ senderID, err := getValidatorID(senderHandle) // who sent this?
+ if err != nil {
+ panic("Cannot map sender's PeerID to a valid replica ID")
+ }
+ msg := &Message{}
+ err = proto.Unmarshal(pbftMsg, msg)
+ if err != nil {
+ logger.Errorf("Error unpacking payload from message: %s", err)
+ return nil
+ }
+ return pbftMessageEvent{
+ msg: msg,
+ sender: senderID,
+ }
+ }
+
+ logger.Errorf("Unknown request: %+v", batchMsg)
+
+ return nil
+}
+
+func (op *obcBatch) logAddTxFromRequest(req *Request) {
+ if logger.IsEnabledFor(logging.DEBUG) {
+ // This is potentially a very large expensive debug statement, guard
+ tx := &pb.Transaction{}
+ err := proto.Unmarshal(req.Payload, tx)
+ if err != nil {
+ logger.Errorf("Replica %d was sent a transaction which did not unmarshal: %s", op.pbft.id, err)
+ } else {
+ logger.Debugf("Replica %d adding request from %d with transaction %s into outstandingReqs", op.pbft.id, req.ReplicaId, tx.Uuid)
+ }
+ }
+}
+
+func (op *obcBatch) resubmitOutstandingReqs() events.Event {
+ op.startTimerIfOutstandingRequests()
+
+ // If we are the primary, and know of outstanding requests, submit them for inclusion in the next batch until
+ // we run out of requests, or a new batch message is triggered (this path will re-enter after execution)
+ // Do not enter while an execution is in progress to prevent duplicating a request
+ if op.pbft.primary(op.pbft.view) == op.pbft.id && op.pbft.activeView && op.pbft.currentExec == nil {
+ needed := op.batchSize - len(op.batchStore)
+
+ for op.reqStore.hasNonPending() {
+ outstanding := op.reqStore.getNextNonPending(needed)
+
+ // If we have enough outstanding requests, this will trigger a batch
+ for _, nreq := range outstanding {
+ if msg := op.leaderProcReq(nreq); msg != nil {
+ op.manager.Inject(msg)
+ }
+ }
+ }
+ }
+ return nil
+}
+
+// allow the primary to send a batch when the timer expires
+func (op *obcBatch) ProcessEvent(event events.Event) events.Event {
+ logger.Debugf("Replica %d batch main thread looping", op.pbft.id)
+ switch et := event.(type) {
+ case batchMessageEvent:
+ ocMsg := et
+ return op.processMessage(ocMsg.msg, ocMsg.sender)
+ case executedEvent:
+ op.stack.Commit(nil, et.tag.([]byte))
+ case committedEvent:
+ logger.Debugf("Replica %d received committedEvent", op.pbft.id)
+ return execDoneEvent{}
+ case execDoneEvent:
+ if res := op.pbft.ProcessEvent(event); res != nil {
+ // This may trigger a view change, if so, process it, we will resubmit on new view
+ return res
+ }
+ return op.resubmitOutstandingReqs()
+ case batchTimerEvent:
+ logger.Infof("Replica %d batch timer expired", op.pbft.id)
+ if op.pbft.activeView && (len(op.batchStore) > 0) {
+ return op.sendBatch()
+ }
+ case *Commit:
+ // TODO, this is extremely hacky, but should go away when batch and core are merged
+ res := op.pbft.ProcessEvent(event)
+ op.startTimerIfOutstandingRequests()
+ return res
+ case viewChangedEvent:
+ op.batchStore = nil
+ // Outstanding reqs doesn't make sense for batch, as all the requests in a batch may be processed
+ // in a different batch, but PBFT core can't see through the opaque structure to see this
+ // so, on view change, clear it out
+ op.pbft.outstandingReqBatches = make(map[string]*RequestBatch)
+
+ logger.Debugf("Replica %d batch thread recognizing new view", op.pbft.id)
+ if op.batchTimerActive {
+ op.stopBatchTimer()
+ }
+
+ if op.pbft.skipInProgress {
+ // If we're the new primary, but we're in state transfer, we can't trust ourself not to duplicate things
+ op.reqStore.outstandingRequests.empty()
+ }
+
+ op.reqStore.pendingRequests.empty()
+ for i := op.pbft.h + 1; i <= op.pbft.h+op.pbft.L; i++ {
+ if i <= op.pbft.lastExec {
+ continue
+ }
+
+ cert, ok := op.pbft.certStore[msgID{v: op.pbft.view, n: i}]
+ if !ok || cert.prePrepare == nil {
+ continue
+ }
+
+ if cert.prePrepare.BatchDigest == "" {
+ // a null request
+ continue
+ }
+
+ if cert.prePrepare.RequestBatch == nil {
+ logger.Warningf("Replica %d found a non-null prePrepare with no request batch, ignoring")
+ continue
+ }
+
+ op.reqStore.storePendings(cert.prePrepare.RequestBatch.GetBatch())
+ }
+
+ return op.resubmitOutstandingReqs()
+ case stateUpdatedEvent:
+ // When the state is updated, clear any outstanding requests, they may have been processed while we were gone
+ op.reqStore = newRequestStore()
+ return op.pbft.ProcessEvent(event)
+ default:
+ return op.pbft.ProcessEvent(event)
+ }
+
+ return nil
+}
+
+func (op *obcBatch) startBatchTimer() {
+ op.batchTimer.Reset(op.batchTimeout, batchTimerEvent{})
+ logger.Debugf("Replica %d started the batch timer", op.pbft.id)
+ op.batchTimerActive = true
+}
+
+func (op *obcBatch) stopBatchTimer() {
+ op.batchTimer.Stop()
+ logger.Debugf("Replica %d stopped the batch timer", op.pbft.id)
+ op.batchTimerActive = false
+}
+
+// Wraps a payload into a batch message, packs it and wraps it into
+// a Fabric message. Called by broadcast before transmission.
+func (op *obcBatch) wrapMessage(msgPayload []byte) *pb.Message {
+ batchMsg := &BatchMessage{Payload: &BatchMessage_PbftMessage{PbftMessage: msgPayload}}
+ packedBatchMsg, _ := proto.Marshal(batchMsg)
+ ocMsg := &pb.Message{
+ Type: pb.Message_CONSENSUS,
+ Payload: packedBatchMsg,
+ }
+ return ocMsg
+}
+
+// Retrieve the idle channel, only used for testing
+func (op *obcBatch) idleChannel() <-chan struct{} {
+ return op.idleChan
+}
+
+// TODO, temporary
+func (op *obcBatch) getManager() events.Manager {
+ return op.manager
+}
+
+func (op *obcBatch) startTimerIfOutstandingRequests() {
+ if op.pbft.skipInProgress || op.pbft.currentExec != nil || !op.pbft.activeView {
+ // Do not start view change timer if some background event is in progress
+ logger.Debugf("Replica %d not starting timer because skip in progress or current exec or in view change", op.pbft.id)
+ return
+ }
+
+ if !op.reqStore.hasNonPending() {
+ // Only start a timer if we are aware of outstanding requests
+ logger.Debugf("Replica %d not starting timer because all outstanding requests are pending", op.pbft.id)
+ return
+ }
+ op.pbft.softStartTimer(op.pbft.requestTimeout, "Batch outstanding requests")
+}
diff --git a/consensus/pbft/batch_test.go b/consensus/pbft/batch_test.go
new file mode 100644
index 00000000000..b5a3fdacfbd
--- /dev/null
+++ b/consensus/pbft/batch_test.go
@@ -0,0 +1,362 @@
+/*
+Copyright IBM Corp. 2016 All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package pbft
+
+import (
+ "testing"
+ "time"
+
+ "github.com/hyperledger/fabric/consensus"
+ "github.com/hyperledger/fabric/consensus/util/events"
+ pb "github.com/hyperledger/fabric/protos"
+
+ "github.com/spf13/viper"
+)
+
+func (op *obcBatch) getPBFTCore() *pbftCore {
+ return op.pbft
+}
+
+func obcBatchHelper(id uint64, config *viper.Viper, stack consensus.Stack) pbftConsumer {
+ // It's not entirely obvious why the compiler likes the parent function, but not newObcBatch directly
+ return newObcBatch(id, config, stack)
+}
+
+func TestNetworkBatch(t *testing.T) {
+ batchSize := 2
+ validatorCount := 4
+ net := makeConsumerNetwork(validatorCount, obcBatchHelper, func(ce *consumerEndpoint) {
+ ce.consumer.(*obcBatch).batchSize = batchSize
+ })
+ defer net.stop()
+
+ broadcaster := net.endpoints[generateBroadcaster(validatorCount)].getHandle()
+ err := net.endpoints[1].(*consumerEndpoint).consumer.RecvMsg(createTxMsg(1), broadcaster)
+ if err != nil {
+ t.Errorf("External request was not processed by backup: %v", err)
+ }
+ err = net.endpoints[2].(*consumerEndpoint).consumer.RecvMsg(createTxMsg(2), broadcaster)
+ if err != nil {
+ t.Fatalf("External request was not processed by backup: %v", err)
+ }
+
+ net.process()
+ net.process()
+
+ if l := len(net.endpoints[0].(*consumerEndpoint).consumer.(*obcBatch).batchStore); l != 0 {
+ t.Errorf("%d messages expected in primary's batchStore, found %v", 0,
+ net.endpoints[0].(*consumerEndpoint).consumer.(*obcBatch).batchStore)
+ }
+
+ for _, ep := range net.endpoints {
+ ce := ep.(*consumerEndpoint)
+ block, err := ce.consumer.(*obcBatch).stack.GetBlock(1)
+ if nil != err {
+ t.Fatalf("Replica %d executed requests, expected a new block on the chain, but could not retrieve it : %s", ce.id, err)
+ }
+ numTrans := len(block.Transactions)
+ if numTrans != batchSize {
+ t.Fatalf("Replica %d executed %d requests, expected %d",
+ ce.id, numTrans, batchSize)
+ }
+ }
+}
+
+func TestClearOustandingReqsOnStateRecovery(t *testing.T) {
+ b := newObcBatch(0, loadConfig(), &omniProto{})
+ defer b.Close()
+
+ b.reqStore.storeOutstanding(&Request{})
+
+ b.manager.Queue() <- stateUpdatedEvent{
+ chkpt: &checkpointMessage{
+ seqNo: 10,
+ },
+ }
+
+ b.manager.Queue() <- nil
+
+ if b.reqStore.outstandingRequests.Len() != 0 {
+ t.Fatalf("Should not have any requests outstanding after completing state transfer")
+ }
+}
+
+func TestOutstandingReqsIngestion(t *testing.T) {
+ bs := [3]*obcBatch{}
+ for i := range bs {
+ omni := &omniProto{
+ UnicastImpl: func(ocMsg *pb.Message, peer *pb.PeerID) error { return nil },
+ }
+ bs[i] = newObcBatch(uint64(i), loadConfig(), omni)
+ defer bs[i].Close()
+
+ // Have vp1 only deliver messages
+ if i == 1 {
+ omni.UnicastImpl = func(ocMsg *pb.Message, peer *pb.PeerID) error {
+ dest, _ := getValidatorID(peer)
+ if dest == 0 || dest == 2 {
+ bs[dest].RecvMsg(ocMsg, &pb.PeerID{Name: "vp1"})
+ }
+ return nil
+ }
+ }
+ }
+
+ err := bs[1].RecvMsg(createTxMsg(1), &pb.PeerID{Name: "vp1"})
+ if err != nil {
+ t.Fatalf("External request was not processed by backup: %v", err)
+ }
+
+ for _, b := range bs {
+ b.manager.Queue() <- nil
+ b.broadcaster.Wait()
+ b.manager.Queue() <- nil
+ }
+
+ for i, b := range bs {
+ b.manager.Queue() <- nil
+ count := b.reqStore.outstandingRequests.Len()
+ if count != 1 {
+ t.Errorf("Batch backup %d should have the request in its store", i)
+ }
+ }
+}
+
+func TestOutstandingReqsResubmission(t *testing.T) {
+ omni := &omniProto{}
+ config := loadConfig()
+ config.Set("general.batchsize", 2)
+ b := newObcBatch(0, config, omni)
+ defer b.Close() // The broadcasting threads only cause problems here... but this test stalls without them
+
+ transactionsBroadcast := 0
+ omni.ExecuteImpl = func(tag interface{}, txs []*pb.Transaction) {
+ transactionsBroadcast += len(txs)
+ logger.Debugf("\nExecuting %d transactions (%v)\n", len(txs), txs)
+ nextExec := b.pbft.lastExec + 1
+ b.pbft.currentExec = &nextExec
+ b.manager.Inject(executedEvent{tag: tag})
+ }
+
+ omni.CommitImpl = func(tag interface{}, meta []byte) {
+ b.manager.Inject(committedEvent{})
+ }
+
+ omni.UnicastImpl = func(ocMsg *pb.Message, dest *pb.PeerID) error {
+ return nil
+ }
+
+ reqs := make([]*Request, 8)
+ for i := 0; i < len(reqs); i++ {
+ reqs[i] = createPbftReq(int64(i), 0)
+ }
+
+ // Add four requests, with a batch size of 2
+ b.reqStore.storeOutstanding(reqs[0])
+ b.reqStore.storeOutstanding(reqs[1])
+ b.reqStore.storeOutstanding(reqs[2])
+ b.reqStore.storeOutstanding(reqs[3])
+
+ executed := make(map[string]struct{})
+ execute := func() {
+ for d, reqBatch := range b.pbft.outstandingReqBatches {
+ if _, ok := executed[d]; ok {
+ continue
+ }
+ executed[d] = struct{}{}
+ b.execute(b.pbft.lastExec+1, reqBatch)
+ }
+ }
+
+ tmp := uint64(1)
+ b.pbft.currentExec = &tmp
+ events.SendEvent(b, committedEvent{})
+ execute()
+
+ if b.reqStore.outstandingRequests.Len() != 0 {
+ t.Fatalf("All request batches should have been executed and deleted after exec")
+ }
+
+ // Simulate changing views, with a request in the qSet, and one outstanding which is not
+ wreqsBatch := &RequestBatch{Batch: []*Request{reqs[4]}}
+ prePrep := &PrePrepare{
+ View: 0,
+ SequenceNumber: b.pbft.lastExec + 1,
+ BatchDigest: "foo",
+ RequestBatch: wreqsBatch,
+ }
+
+ b.pbft.certStore[msgID{v: prePrep.View, n: prePrep.SequenceNumber}] = &msgCert{prePrepare: prePrep}
+
+ // Add the request, which is already pre-prepared, to be outstanding, and one outstanding not pending, not prepared
+ b.reqStore.storeOutstanding(reqs[4]) // req 6
+ b.reqStore.storeOutstanding(reqs[5])
+ b.reqStore.storeOutstanding(reqs[6])
+ b.reqStore.storeOutstanding(reqs[7])
+
+ events.SendEvent(b, viewChangedEvent{})
+ execute()
+
+ if b.reqStore.hasNonPending() {
+ t.Errorf("All requests should have been resubmitted after view change")
+ }
+
+ // We should have one request in batch which has not been sent yet
+ expected := 6
+ if transactionsBroadcast != expected {
+ t.Errorf("Expected %d transactions broadcast, got %d", expected, transactionsBroadcast)
+ }
+
+ events.SendEvent(b, batchTimerEvent{})
+ execute()
+
+ // If the already prepared request were to be resubmitted, we would get count 8 here
+ expected = 7
+ if transactionsBroadcast != expected {
+ t.Errorf("Expected %d transactions broadcast, got %d", expected, transactionsBroadcast)
+ }
+}
+
+func TestViewChangeOnPrimarySilence(t *testing.T) {
+ b := newObcBatch(1, loadConfig(), &omniProto{
+ UnicastImpl: func(ocMsg *pb.Message, peer *pb.PeerID) error { return nil },
+ SignImpl: func(msg []byte) ([]byte, error) { return msg, nil },
+ VerifyImpl: func(peerID *pb.PeerID, signature []byte, message []byte) error { return nil },
+ })
+ b.pbft.requestTimeout = 50 * time.Millisecond
+ defer b.Close()
+
+ // Send a request, which will be ignored, triggering view change
+ b.manager.Queue() <- batchMessageEvent{createTxMsg(1), &pb.PeerID{Name: "vp0"}}
+ time.Sleep(time.Second)
+ b.manager.Queue() <- nil
+
+ if b.pbft.activeView {
+ t.Fatalf("Should have caused a view change")
+ }
+}
+
+func obcBatchSizeOneHelper(id uint64, config *viper.Viper, stack consensus.Stack) pbftConsumer {
+ // It's not entirely obvious why the compiler likes the parent function, but not newObcClassic directly
+ config.Set("general.batchsize", 1)
+ return newObcBatch(id, config, stack)
+}
+
+func TestClassicStateTransfer(t *testing.T) {
+ validatorCount := 4
+ net := makeConsumerNetwork(validatorCount, obcBatchSizeOneHelper, func(ce *consumerEndpoint) {
+ ce.consumer.(*obcBatch).pbft.K = 2
+ ce.consumer.(*obcBatch).pbft.L = 4
+ })
+ defer net.stop()
+ // net.debug = true
+
+ filterMsg := true
+ net.filterFn = func(src int, dst int, msg []byte) []byte {
+ if filterMsg && dst == 3 { // 3 is byz
+ return nil
+ }
+ return msg
+ }
+
+ // Advance the network one seqNo past so that Replica 3 will have to do statetransfer
+ broadcaster := net.endpoints[generateBroadcaster(validatorCount)].getHandle()
+ net.endpoints[1].(*consumerEndpoint).consumer.RecvMsg(createTxMsg(1), broadcaster)
+ net.process()
+
+ // Move the seqNo to 9, at seqNo 6, Replica 3 will realize it's behind, transfer to seqNo 8, then execute seqNo 9
+ filterMsg = false
+ for n := 2; n <= 9; n++ {
+ net.endpoints[1].(*consumerEndpoint).consumer.RecvMsg(createTxMsg(int64(n)), broadcaster)
+ }
+
+ net.process()
+
+ for _, ep := range net.endpoints {
+ ce := ep.(*consumerEndpoint)
+ obc := ce.consumer.(*obcBatch)
+ _, err := obc.stack.GetBlock(9)
+ if nil != err {
+ t.Errorf("Replica %d executed requests, expected a new block on the chain, but could not retrieve it : %s", ce.id, err)
+ }
+ if !obc.pbft.activeView || obc.pbft.view != 0 {
+ t.Errorf("Replica %d not active in view 0, is %v %d", ce.id, obc.pbft.activeView, obc.pbft.view)
+ }
+ }
+}
+
+func TestClassicBackToBackStateTransfer(t *testing.T) {
+ validatorCount := 4
+ net := makeConsumerNetwork(validatorCount, obcBatchSizeOneHelper, func(ce *consumerEndpoint) {
+ ce.consumer.(*obcBatch).pbft.K = 2
+ ce.consumer.(*obcBatch).pbft.L = 4
+ ce.consumer.(*obcBatch).pbft.requestTimeout = time.Hour // We do not want any view changes
+ })
+ defer net.stop()
+ // net.debug = true
+
+ filterMsg := true
+ net.filterFn = func(src int, dst int, msg []byte) []byte {
+ if filterMsg && dst == 3 { // 3 is byz
+ return nil
+ }
+ return msg
+ }
+
+ // Get the group to advance past seqNo 1, leaving Replica 3 behind
+ broadcaster := net.endpoints[generateBroadcaster(validatorCount)].getHandle()
+ net.endpoints[1].(*consumerEndpoint).consumer.RecvMsg(createTxMsg(1), broadcaster)
+ net.process()
+
+ // Now start including Replica 3, go to sequence number 10, Replica 3 will trigger state transfer
+ // after seeing seqNo 8, then pass another target for seqNo 10 and 12, but transfer to 8, but the network
+ // will have already moved on and be past to seqNo 13, outside of Replica 3's watermarks, but
+ // Replica 3 will execute through seqNo 12
+ filterMsg = false
+ for n := 2; n <= 21; n++ {
+ net.endpoints[1].(*consumerEndpoint).consumer.RecvMsg(createTxMsg(int64(n)), broadcaster)
+ }
+
+ net.process()
+
+ for _, ep := range net.endpoints {
+ ce := ep.(*consumerEndpoint)
+ obc := ce.consumer.(*obcBatch)
+ _, err := obc.stack.GetBlock(21)
+ if nil != err {
+ t.Errorf("Replica %d executed requests, expected a new block on the chain, but could not retrieve it : %s", ce.id, err)
+ }
+ if !obc.pbft.activeView || obc.pbft.view != 0 {
+ t.Errorf("Replica %d not active in view 0, is %v %d", ce.id, obc.pbft.activeView, obc.pbft.view)
+ }
+ }
+}
+
+func TestClearBatchStoreOnViewChange(t *testing.T) {
+ b := newObcBatch(1, loadConfig(), &omniProto{})
+ defer b.Close()
+
+ b.batchStore = []*Request{&Request{}}
+
+ // Send a request, which will be ignored, triggering view change
+ b.manager.Queue() <- viewChangedEvent{}
+ b.manager.Queue() <- nil
+
+ if len(b.batchStore) != 0 {
+ t.Fatalf("Should have cleared the batch store on view change")
+ }
+}
diff --git a/consensus/pbft/broadcast.go b/consensus/pbft/broadcast.go
new file mode 100644
index 00000000000..bb30feea7bd
--- /dev/null
+++ b/consensus/pbft/broadcast.go
@@ -0,0 +1,208 @@
+/*
+Copyright IBM Corp. 2016 All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package pbft
+
+import (
+ "fmt"
+ "sync"
+ "time"
+
+ "github.com/hyperledger/fabric/consensus"
+ pb "github.com/hyperledger/fabric/protos"
+)
+
+type communicator interface {
+ consensus.Communicator
+ consensus.Inquirer
+}
+
+type broadcaster struct {
+ comm communicator
+
+ f int
+ msgChans map[uint64]chan *sendRequest
+ closed sync.WaitGroup
+ closedCh chan struct{}
+}
+
+type sendRequest struct {
+ msg *pb.Message
+ done chan bool
+}
+
+func newBroadcaster(self uint64, N int, f int, c communicator) *broadcaster {
+ queueSize := 10 // XXX increase after testing
+
+ chans := make(map[uint64]chan *sendRequest)
+ b := &broadcaster{
+ comm: c,
+ f: f,
+ msgChans: chans,
+ closedCh: make(chan struct{}),
+ }
+ for i := 0; i < N; i++ {
+ if uint64(i) == self {
+ continue
+ }
+ chans[uint64(i)] = make(chan *sendRequest, queueSize)
+ }
+
+ // We do not start the go routines in the above loop to avoid concurrent map read/writes
+ for i := 0; i < N; i++ {
+ go b.drainer(uint64(i))
+ }
+
+ return b
+}
+
+func (b *broadcaster) Close() {
+ close(b.closedCh)
+ b.closed.Wait()
+}
+
+func (b *broadcaster) Wait() {
+ b.closed.Wait()
+}
+
+func (b *broadcaster) drainerSend(dest uint64, send *sendRequest, successLastTime bool) bool {
+ // Note, successLastTime is purely used to avoid flooding the log with unnecessary warning messages when a network problem is encountered
+ defer func() {
+ b.closed.Done()
+ }()
+ h, err := getValidatorHandle(dest)
+ if err != nil {
+ if successLastTime {
+ logger.Warningf("could not get handle for replica %d", dest)
+ }
+ send.done <- false
+ return false
+ }
+
+ err = b.comm.Unicast(send.msg, h)
+ if err != nil {
+ if successLastTime {
+ logger.Warningf("could not send to replica %d: %v", dest, err)
+ }
+ send.done <- false
+ return false
+ }
+
+ send.done <- true
+ return true
+
+}
+
+func (b *broadcaster) drainer(dest uint64) {
+ successLastTime := false
+ destChan := b.msgChans[dest] // Avoid doing the map lookup every send
+
+ for {
+ select {
+ case send := <-destChan:
+ successLastTime = b.drainerSend(dest, send, successLastTime)
+ case <-b.closedCh:
+ for {
+ // Drain the message channel to free calling waiters before we shut down
+ select {
+ case send := <-destChan:
+ send.done <- false
+ b.closed.Done()
+ default:
+ return
+ }
+ }
+ }
+ }
+}
+
+func (b *broadcaster) unicastOne(msg *pb.Message, dest uint64, wait chan bool) {
+ select {
+ case b.msgChans[dest] <- &sendRequest{
+ msg: msg,
+ done: wait,
+ }:
+ default:
+ // If this channel is full, we must discard the message and flag it as done
+ wait <- false
+ b.closed.Done()
+ }
+}
+
+func (b *broadcaster) send(msg *pb.Message, dest *uint64) error {
+ select {
+ case <-b.closedCh:
+ return fmt.Errorf("broadcaster closed")
+ default:
+ }
+
+ var destCount int
+ var required int
+ if dest != nil {
+ destCount = 1
+ required = 1
+ } else {
+ destCount = len(b.msgChans)
+ required = destCount - b.f
+ }
+
+ wait := make(chan bool, destCount)
+
+ if dest != nil {
+ b.closed.Add(1)
+ b.unicastOne(msg, *dest, wait)
+ } else {
+ b.closed.Add(len(b.msgChans))
+ for i := range b.msgChans {
+ b.unicastOne(msg, i, wait)
+ }
+ }
+
+ succeeded := 0
+ timer := time.NewTimer(time.Second) // TODO, make this configurable
+
+ // This loop will try to send, until one of:
+ // a) the required number of sends succeed
+ // b) all sends complete regardless of success
+ // c) the timeout expires and the required number of sends have returned
+outer:
+ for i := 0; i < destCount; i++ {
+ select {
+ case success := <-wait:
+ if success {
+ succeeded++
+ if succeeded >= required {
+ break outer
+ }
+ }
+ case <-timer.C:
+ for i := i; i < required; i++ {
+ <-wait
+ }
+ break outer
+ }
+ }
+
+ return nil
+}
+
+func (b *broadcaster) Unicast(msg *pb.Message, dest uint64) error {
+ return b.send(msg, &dest)
+}
+
+func (b *broadcaster) Broadcast(msg *pb.Message) error {
+ return b.send(msg, nil)
+}
diff --git a/consensus/pbft/broadcast_test.go b/consensus/pbft/broadcast_test.go
new file mode 100644
index 00000000000..f30e3a97ea2
--- /dev/null
+++ b/consensus/pbft/broadcast_test.go
@@ -0,0 +1,274 @@
+/*
+Copyright IBM Corp. 2016 All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package pbft
+
+import (
+ "fmt"
+ "testing"
+ "time"
+
+ pb "github.com/hyperledger/fabric/protos"
+)
+
+type mockMsg struct {
+ msg *pb.Message
+ dest *pb.PeerID
+}
+
+type mockComm struct {
+ self uint64
+ n uint64
+ msgCh chan mockMsg
+}
+
+func (m *mockComm) Unicast(msg *pb.Message, dest *pb.PeerID) error {
+ m.msgCh <- mockMsg{msg, dest}
+ return nil
+}
+
+func (m *mockComm) Broadcast(msg *pb.Message, t pb.PeerEndpoint_Type) error {
+ return nil
+}
+
+func (m *mockComm) GetNetworkInfo() (*pb.PeerEndpoint, []*pb.PeerEndpoint, error) {
+ return nil, nil, nil
+}
+
+func (m *mockComm) GetNetworkHandles() (*pb.PeerID, []*pb.PeerID, error) {
+ var h []*pb.PeerID
+ for n := uint64(0); n < m.n; n++ {
+ h = append(h, &pb.PeerID{Name: fmt.Sprintf("vp%d", n)})
+ }
+ return h[m.self], h, nil
+}
+
+func TestBroadcast(t *testing.T) {
+ m := &mockComm{
+ self: 1,
+ n: 4,
+ msgCh: make(chan mockMsg, 4),
+ }
+ sent := make(map[string]int)
+ go func() {
+ for msg := range m.msgCh {
+ sent[msg.dest.Name]++
+ }
+ }()
+
+ b := newBroadcaster(1, 4, 1, m)
+
+ msg := &pb.Message{Payload: []byte("hi")}
+ b.Broadcast(msg)
+ time.Sleep(100 * time.Millisecond)
+ b.Close()
+
+ sentCount := 0
+ for _, q := range sent {
+ if q == 1 {
+ sentCount++
+ }
+ }
+
+ if sentCount < 2 {
+ t.Errorf("broadcast did not send to all peers: %v", sent)
+ }
+}
+
+type mockStuckComm struct {
+ mockComm
+ done chan struct{}
+}
+
+func (m *mockStuckComm) Unicast(msg *pb.Message, dest *pb.PeerID) error {
+ ret := m.mockComm.Unicast(msg, dest)
+ if dest.Name == "vp0" {
+ select {
+ case <-time.After(2 * time.Second):
+ return fmt.Errorf("timeout")
+ case <-m.done:
+ return fmt.Errorf("closed")
+ }
+ }
+ return ret
+}
+
+func TestBroadcastStuck(t *testing.T) {
+ m := &mockStuckComm{
+ mockComm: mockComm{
+ self: 1,
+ n: 4,
+ msgCh: make(chan mockMsg),
+ },
+ done: make(chan struct{}),
+ }
+ sent := make(map[string][]string)
+ go func() {
+ for msg := range m.msgCh {
+ key := string(msg.msg.Payload)
+ sent[key] = append(sent[key], msg.dest.Name)
+ }
+ }()
+
+ b := newBroadcaster(1, 4, 1, m)
+
+ maxc := 20
+ for c := 0; c < maxc; c++ {
+ b.Broadcast(&pb.Message{Payload: []byte(fmt.Sprintf("%d", c))})
+ }
+
+ done := make(chan struct{})
+ go func() {
+ select {
+ case <-done:
+ return
+ case <-time.After(time.Second):
+ t.Fatal("blocked")
+ }
+ }()
+ time.Sleep(100 * time.Millisecond)
+ close(m.done)
+ b.Close()
+ close(done)
+
+ sendDone := 0
+ for _, q := range sent {
+ if len(q) >= 2 {
+ sendDone++
+ }
+ }
+ if sendDone != maxc {
+ t.Errorf("expected %d sent messages: %v", maxc, sent)
+ }
+}
+
+func TestBroadcastUnicast(t *testing.T) {
+ m := &mockComm{
+ self: 1,
+ n: 4,
+ msgCh: make(chan mockMsg, 4),
+ }
+ sent := make(map[string]int)
+ go func() {
+ for msg := range m.msgCh {
+ sent[msg.dest.Name]++
+ }
+ }()
+
+ b := newBroadcaster(1, 4, 1, m)
+
+ msg := &pb.Message{Payload: []byte("hi")}
+ b.Unicast(msg, 0)
+ time.Sleep(100 * time.Millisecond)
+ b.Close()
+
+ sentCount := 0
+ for _, q := range sent {
+ if q == 1 {
+ sentCount++
+ }
+ }
+
+ if sentCount != 1 {
+ t.Errorf("broadcast did not send to dest peer: %v", sent)
+ }
+}
+
+type mockFailComm struct {
+ mockComm
+ done chan struct{}
+}
+
+func (m *mockFailComm) Unicast(msg *pb.Message, dest *pb.PeerID) error {
+ return fmt.Errorf("always fails on purpose")
+}
+
+func TestBroadcastAllFail(t *testing.T) {
+ m := &mockFailComm{
+ mockComm: mockComm{
+ self: 1,
+ n: 4,
+ msgCh: make(chan mockMsg),
+ },
+ done: make(chan struct{}),
+ }
+
+ b := newBroadcaster(1, 4, 1, m)
+
+ maxc := 20
+ for c := 0; c < maxc; c++ {
+ b.Broadcast(&pb.Message{Payload: []byte(fmt.Sprintf("%d", c))})
+ }
+
+ done := make(chan struct{})
+ go func() {
+ close(m.done)
+ b.Close() // If the broadcasts are still trying (despite all the failures), this call blocks until the timeout
+ close(done)
+ }()
+
+ select {
+ case <-done:
+ return
+ case <-time.After(time.Second):
+ t.Fatal("Could not successfully close broadcaster, after 1 second")
+ }
+}
+
+type mockIndefinitelyStuckComm struct {
+ mockComm
+ done chan struct{}
+}
+
+func (m *mockIndefinitelyStuckComm) Unicast(msg *pb.Message, dest *pb.PeerID) error {
+ if dest.Name == "vp0" {
+ <-m.done
+ }
+ return fmt.Errorf("Always failing, on purpose, with vp0 stuck")
+}
+
+func TestBroadcastIndefinitelyStuck(t *testing.T) {
+ m := &mockIndefinitelyStuckComm{
+ mockComm: mockComm{
+ self: 1,
+ n: 4,
+ msgCh: make(chan mockMsg),
+ },
+ done: make(chan struct{}),
+ }
+
+ b := newBroadcaster(1, 4, 1, m)
+
+ broadcastDone := make(chan struct{})
+
+ go func() {
+ maxc := 3
+ for c := 0; c < maxc; c++ {
+ b.Broadcast(&pb.Message{Payload: []byte(fmt.Sprintf("%d", c))})
+ }
+ close(broadcastDone)
+ }()
+
+ select {
+ case <-broadcastDone:
+ // Success
+ case <-time.After(10 * time.Second):
+ t.Errorf("Got blocked for too long")
+ }
+
+ close(m.done)
+ b.Close()
+}
diff --git a/consensus/pbft/config.yaml b/consensus/pbft/config.yaml
new file mode 100644
index 00000000000..ba74553e9f0
--- /dev/null
+++ b/consensus/pbft/config.yaml
@@ -0,0 +1,73 @@
+---
+################################################################################
+#
+# PBFT PROPERTIES
+#
+# - List all algorithm-specific properties here.
+# - Nest keys where appropriate, and sort alphabetically for easier parsing.
+#
+################################################################################
+general:
+
+ # Operational mode: currently only batch ( this value is case-insensitive)
+ mode: batch
+
+ # Maximum number of validators/replicas we expect in the network
+ # Keep the "N" in quotes, or it will be interpreted as "false".
+ "N": 4
+
+ # Number of byzantine nodes we will tolerate
+ f: 1
+
+ # Checkpoint period is the maximum number of pbft requests that must be
+ # re-processed in a view change. A smaller checkpoint period will decrease
+ # the amount of time required to recover from an error, but will decrease
+ # overall throughput in normal case operation.
+ K: 10
+
+ # Affects the receive log size which is K * logmultiplier
+ # The primary will only send sequence numbers which fall within K * logmultiplier/2 of
+ # its high watermark, so this cannot be set to less than 2
+ # For high volume/high latency environments, a higher log size may increase throughput
+ logmultiplier: 4
+
+ # How many requests should the primary send per pre-prepare when in "batch" mode
+ batchsize: 500
+
+ # Whether the replica should act as a byzantine one; useful for debugging on testnets
+ byzantine: false
+
+ # After how many checkpoint periods the primary gets cycled automatically. Set to 0 to disable.
+ viewchangeperiod: 0
+
+ # Timeouts
+ timeout:
+
+ # Send a pre-prepare if there are pending requests, batchsize isn't reached yet,
+ # and this much time has elapsed since the current batch was formed
+ batch: 1s
+
+ # How long may a request take between reception and execution, must be greater than the batch timeout
+ request: 2s
+
+ # How long may a view change take
+ viewchange: 2s
+
+ # How long to wait for a view change quorum before resending (the same) view change
+ resendviewchange: 2s
+
+ # Interval to send "keep-alive" null requests. Set to 0 to disable. If enabled, must be greater than request timeout
+ nullrequest: 0s
+
+################################################################################
+#
+# SECTION: EXECUTOR
+#
+# - This section applies to the distinct executor service
+#
+################################################################################
+executor:
+
+ # The queue size for execution requests, ordering proceeds and queues execution
+ # requests. This value should always exceed the pbft log size
+ queuesize: 30
diff --git a/consensus/pbft/deduplicator.go b/consensus/pbft/deduplicator.go
new file mode 100644
index 00000000000..1771c03c8e5
--- /dev/null
+++ b/consensus/pbft/deduplicator.go
@@ -0,0 +1,72 @@
+/*
+Copyright IBM Corp. 2016 All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package pbft
+
+import (
+ "time"
+)
+
+// deduplicator maintains the most recent Request timestamp for each
+// replica. Two timestamps are maintained per replica. One timestamp
+// tracks the most recent Request received from a replica, the other
+// timeout tracks the most recent executed Request.
+type deduplicator struct {
+ reqTimestamps map[uint64]time.Time
+ execTimestamps map[uint64]time.Time
+}
+
+// newDeduplicator creates a new deduplicator.
+func newDeduplicator() *deduplicator {
+ d := &deduplicator{}
+ d.reqTimestamps = make(map[uint64]time.Time)
+ d.execTimestamps = make(map[uint64]time.Time)
+ return d
+}
+
+// Request updates the received request timestamp for the submitting
+// replica. If the request is older than any previously received or
+// executed request, Request() will return false, indicating a stale
+// request.
+func (d *deduplicator) Request(req *Request) bool {
+ reqTime := time.Unix(req.Timestamp.Seconds, int64(req.Timestamp.Nanos))
+ if !reqTime.After(d.reqTimestamps[req.ReplicaId]) ||
+ !reqTime.After(d.execTimestamps[req.ReplicaId]) {
+ return false
+ }
+ d.reqTimestamps[req.ReplicaId] = reqTime
+ return true
+}
+
+// Execute updates the executed request timestamp for the submitting
+// replica. If the request is older than any previously executed
+// request from the same replica, Execute() will return false,
+// indicating a stale request.
+func (d *deduplicator) Execute(req *Request) bool {
+ reqTime := time.Unix(req.Timestamp.Seconds, int64(req.Timestamp.Nanos))
+ if !reqTime.After(d.execTimestamps[req.ReplicaId]) {
+ return false
+ }
+ d.execTimestamps[req.ReplicaId] = reqTime
+ return true
+}
+
+// IsNew returns true if this Request is newer than any previously
+// executed request of the submitting replica.
+func (d *deduplicator) IsNew(req *Request) bool {
+ reqTime := time.Unix(req.Timestamp.Seconds, int64(req.Timestamp.Nanos))
+ return reqTime.After(d.execTimestamps[req.ReplicaId])
+}
diff --git a/consensus/pbft/external.go b/consensus/pbft/external.go
new file mode 100644
index 00000000000..b6880638f9e
--- /dev/null
+++ b/consensus/pbft/external.go
@@ -0,0 +1,87 @@
+/*
+Copyright IBM Corp. 2016 All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package pbft
+
+import (
+ "github.com/hyperledger/fabric/consensus/util/events"
+ pb "github.com/hyperledger/fabric/protos"
+)
+
+// --------------------------------------------------------------
+//
+// external contains all of the functions which
+// are intended to be called from outside of the pbft package
+//
+// --------------------------------------------------------------
+
+// Event types
+
+// stateUpdatedEvent is sent when state transfer completes
+type stateUpdatedEvent struct {
+ chkpt *checkpointMessage
+ target *pb.BlockchainInfo
+}
+
+// executedEvent is sent when a requested execution completes
+type executedEvent struct {
+ tag interface{}
+}
+
+// commitedEvent is sent when a requested commit completes
+type committedEvent struct {
+ tag interface{}
+ target *pb.BlockchainInfo
+}
+
+// rolledBackEvent is sent when a requested rollback completes
+type rolledBackEvent struct{}
+
+type externalEventReceiver struct {
+ manager events.Manager
+}
+
+// RecvMsg is called by the stack when a new message is received
+func (eer *externalEventReceiver) RecvMsg(ocMsg *pb.Message, senderHandle *pb.PeerID) error {
+ eer.manager.Queue() <- batchMessageEvent{
+ msg: ocMsg,
+ sender: senderHandle,
+ }
+ return nil
+}
+
+// Executed is called whenever Execute completes, no-op for noops as it uses the legacy synchronous api
+func (eer *externalEventReceiver) Executed(tag interface{}) {
+ eer.manager.Queue() <- executedEvent{tag}
+}
+
+// Committed is called whenever Commit completes, no-op for noops as it uses the legacy synchronous api
+func (eer *externalEventReceiver) Committed(tag interface{}, target *pb.BlockchainInfo) {
+ eer.manager.Queue() <- committedEvent{tag, target}
+}
+
+// RolledBack is called whenever a Rollback completes, no-op for noops as it uses the legacy synchronous api
+func (eer *externalEventReceiver) RolledBack(tag interface{}) {
+ eer.manager.Queue() <- rolledBackEvent{}
+}
+
+// StateUpdated is a signal from the stack that it has fast-forwarded its state
+func (eer *externalEventReceiver) StateUpdated(tag interface{}, target *pb.BlockchainInfo) {
+ eer.manager.Queue() <- stateUpdatedEvent{
+ chkpt: tag.(*checkpointMessage),
+ target: target,
+ }
+}
diff --git a/consensus/pbft/fuzz_test.go b/consensus/pbft/fuzz_test.go
new file mode 100644
index 00000000000..1d35050591e
--- /dev/null
+++ b/consensus/pbft/fuzz_test.go
@@ -0,0 +1,326 @@
+/*
+Copyright IBM Corp. 2016 All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package pbft
+
+import (
+ "math/rand"
+ "reflect"
+ "testing"
+
+ "github.com/golang/protobuf/proto"
+ "github.com/google/gofuzz"
+ "github.com/op/go-logging"
+
+ "fmt"
+)
+
+func newFuzzMock() *omniProto {
+ return &omniProto{
+ broadcastImpl: func(msgPayload []byte) {
+ // No-op
+ },
+ verifyImpl: func(senderID uint64, signature []byte, message []byte) error {
+ return nil
+ },
+ signImpl: func(msg []byte) ([]byte, error) {
+ return msg, nil
+ },
+ viewChangeImpl: func(curView uint64) {
+ },
+ validateStateImpl: func() {},
+ invalidateStateImpl: func() {},
+ }
+}
+
+func TestFuzz(t *testing.T) {
+ if testing.Short() {
+ t.Skip("Skipping fuzz test")
+ }
+
+ logging.SetBackend(logging.InitForTesting(logging.ERROR))
+
+ mock := newFuzzMock()
+ primary, pmanager := createRunningPbftWithManager(0, loadConfig(), mock)
+ defer primary.close()
+ defer pmanager.Halt()
+ mock = newFuzzMock()
+ backup, bmanager := createRunningPbftWithManager(1, loadConfig(), mock)
+ defer backup.close()
+ defer bmanager.Halt()
+
+ f := fuzz.New()
+
+ for i := 0; i < 30; i++ {
+ msg := &Message{}
+ f.Fuzz(msg)
+ // roundtrip through protobufs to translate
+ // nil slices into empty slices
+ raw, _ := proto.Marshal(msg)
+ proto.Unmarshal(raw, msg)
+
+ var senderID uint64
+ if reqBatch := msg.GetRequestBatch(); reqBatch != nil {
+ senderID = primary.id // doesn't matter, not checked
+ } else if preprep := msg.GetPrePrepare(); preprep != nil {
+ senderID = preprep.ReplicaId
+ } else if prep := msg.GetPrepare(); prep != nil {
+ senderID = prep.ReplicaId
+ } else if commit := msg.GetCommit(); commit != nil {
+ senderID = commit.ReplicaId
+ } else if chkpt := msg.GetCheckpoint(); chkpt != nil {
+ senderID = chkpt.ReplicaId
+ } else if vc := msg.GetViewChange(); vc != nil {
+ senderID = vc.ReplicaId
+ } else if nv := msg.GetNewView(); nv != nil {
+ senderID = nv.ReplicaId
+ }
+
+ pmanager.Queue() <- &pbftMessageEvent{msg: msg, sender: senderID}
+ bmanager.Queue() <- &pbftMessageEvent{msg: msg, sender: senderID}
+ }
+
+ logging.Reset()
+}
+
+func (msg *Message) Fuzz(c fuzz.Continue) {
+ switch c.RandUint64() % 7 {
+ case 0:
+ m := &Message_RequestBatch{}
+ c.Fuzz(m)
+ msg.Payload = m
+ case 1:
+ m := &Message_PrePrepare{}
+ c.Fuzz(m)
+ msg.Payload = m
+ case 2:
+ m := &Message_Prepare{}
+ c.Fuzz(m)
+ msg.Payload = m
+ case 3:
+ m := &Message_Commit{}
+ c.Fuzz(m)
+ msg.Payload = m
+ case 4:
+ m := &Message_Checkpoint{}
+ c.Fuzz(m)
+ msg.Payload = m
+ case 5:
+ m := &Message_ViewChange{}
+ c.Fuzz(m)
+ msg.Payload = m
+ case 6:
+ m := &Message_NewView{}
+ c.Fuzz(m)
+ msg.Payload = m
+ }
+}
+
+func TestMinimalFuzz(t *testing.T) {
+ var err error
+ if testing.Short() {
+ t.Skip("Skipping fuzz test")
+ }
+
+ validatorCount := 4
+ net := makePBFTNetwork(validatorCount, nil)
+ defer net.stop()
+ fuzzer := &protoFuzzer{r: rand.New(rand.NewSource(0))}
+ net.filterFn = fuzzer.fuzzPacket
+
+ noExec := 0
+ for reqID := 1; reqID < 30; reqID++ {
+ if reqID%3 == 0 {
+ fuzzer.fuzzNode = fuzzer.r.Intn(len(net.endpoints))
+ fmt.Printf("Fuzzing node %d\n", fuzzer.fuzzNode)
+ }
+
+ sender := uint64(generateBroadcaster(validatorCount))
+ reqBatchMsg := createPbftReqBatchMsg(int64(reqID), sender)
+ for _, ep := range net.endpoints {
+ ep.(*pbftEndpoint).manager.Queue() <- &pbftMessageEvent{msg: reqBatchMsg, sender: sender}
+ }
+ if err != nil {
+ t.Fatalf("Request failed: %s", err)
+ }
+
+ err = net.process()
+ if err != nil {
+ t.Fatalf("Processing failed: %s", err)
+ }
+
+ quorum := 0
+ for _, ep := range net.endpoints {
+ if ep.(*pbftEndpoint).sc.executions > 0 {
+ quorum++
+ ep.(*pbftEndpoint).sc.executions = 0
+ }
+ }
+ if quorum < len(net.endpoints)/3 {
+ noExec++
+ }
+ if noExec > 1 {
+ noExec = 0
+ for _, ep := range net.endpoints {
+ ep.(*pbftEndpoint).pbft.sendViewChange()
+ }
+ err = net.process()
+ if err != nil {
+ t.Fatalf("Processing failed: %s", err)
+ }
+ }
+ }
+}
+
+type protoFuzzer struct {
+ fuzzNode int
+ r *rand.Rand
+}
+
+func (f *protoFuzzer) fuzzPacket(src int, dst int, msgOuter []byte) []byte {
+ if dst != -1 || src != f.fuzzNode {
+ return msgOuter
+ }
+
+ // XXX only with some probability
+ msg := &Message{}
+ if proto.Unmarshal(msgOuter, msg) != nil {
+ panic("could not unmarshal")
+ }
+
+ fmt.Printf("Will fuzz %v\n", msg)
+
+ if m := msg.GetPrePrepare(); m != nil {
+ f.fuzzPayload(m)
+ }
+ if m := msg.GetPrepare(); m != nil {
+ f.fuzzPayload(m)
+ }
+ if m := msg.GetCommit(); m != nil {
+ f.fuzzPayload(m)
+ }
+ if m := msg.GetCheckpoint(); m != nil {
+ f.fuzzPayload(m)
+ }
+ if m := msg.GetViewChange(); m != nil {
+ f.fuzzPayload(m)
+ }
+ if m := msg.GetNewView(); m != nil {
+ f.fuzzPayload(m)
+ }
+
+ newMsg, _ := proto.Marshal(msg)
+ return newMsg
+}
+
+func (f *protoFuzzer) fuzzPayload(s interface{}) {
+ v := reflect.ValueOf(s).Elem()
+ t := v.Type()
+
+ var elems []reflect.Value
+ var fields []string
+ for i := 0; i < v.NumField(); i++ {
+ if t.Field(i).Name == "ReplicaId" {
+ continue
+ }
+ elems = append(elems, v.Field(i))
+ fields = append(fields, t.Field(i).Name)
+ }
+
+ idx := f.r.Intn(len(elems))
+ elm := elems[idx]
+ fld := fields[idx]
+ fmt.Printf("Fuzzing %s:%v\n", fld, elm)
+ f.Fuzz(elm)
+}
+
+func (f *protoFuzzer) Fuzz(v reflect.Value) {
+ if !v.CanSet() {
+ return
+ }
+
+ switch v.Kind() {
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ f.FuzzInt(v)
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
+ f.FuzzUint(v)
+ case reflect.String:
+ str := ""
+ for i := 0; i < v.Len(); i++ {
+ str = str + string(' '+rune(f.r.Intn(94)))
+ }
+ v.SetString(str)
+ return
+ case reflect.Ptr:
+ if !v.IsNil() {
+ f.Fuzz(v.Elem())
+ }
+ return
+ case reflect.Slice:
+ mode := f.r.Intn(3)
+ switch {
+ case v.Len() > 0 && mode == 0:
+ // fuzz entry
+ f.Fuzz(v.Index(f.r.Intn(v.Len())))
+ case v.Len() > 0 && mode == 1:
+ // remove entry
+ entry := f.r.Intn(v.Len())
+ pre := v.Slice(0, entry)
+ post := v.Slice(entry+1, v.Len())
+ v.Set(reflect.AppendSlice(pre, post))
+ default:
+ // add entry
+ entry := reflect.MakeSlice(v.Type(), 1, 1)
+ f.Fuzz(entry) // XXX fill all fields
+ v.Set(reflect.AppendSlice(v, entry))
+ }
+ return
+ case reflect.Struct:
+ f.Fuzz(v.Field(f.r.Intn(v.NumField())))
+ return
+ case reflect.Map:
+ // TODO fuzz map
+ default:
+ panic(fmt.Sprintf("Not fuzzing %v %+v", v.Kind(), v))
+ }
+}
+
+func (f *protoFuzzer) FuzzInt(v reflect.Value) {
+ v.SetInt(v.Int() + f.fuzzyInt())
+}
+
+func (f *protoFuzzer) FuzzUint(v reflect.Value) {
+ val := v.Uint()
+ for {
+ delta := f.fuzzyInt()
+ if delta > 0 || uint64(-delta) < val {
+ v.SetUint(val + uint64(delta))
+ return
+ }
+ }
+}
+
+func (f *protoFuzzer) fuzzyInt() int64 {
+ i := int64(rand.NewZipf(f.r, 3, 1, 200).Uint64() + 1)
+ if rand.Intn(2) == 0 {
+ i = -i
+ }
+ fmt.Printf("Changing int by %d\n", i)
+ return i
+}
+
+func (f *protoFuzzer) FuzzSlice(v reflect.Value) {
+}
diff --git a/consensus/pbft/messages.pb.go b/consensus/pbft/messages.pb.go
new file mode 100644
index 00000000000..a7e9d867967
--- /dev/null
+++ b/consensus/pbft/messages.pb.go
@@ -0,0 +1,671 @@
+// Code generated by protoc-gen-go.
+// source: messages.proto
+// DO NOT EDIT!
+
+/*
+Package pbft is a generated protocol buffer package.
+
+It is generated from these files:
+ messages.proto
+
+It has these top-level messages:
+ Message
+ Request
+ PrePrepare
+ Prepare
+ Commit
+ BlockInfo
+ Checkpoint
+ ViewChange
+ PQset
+ NewView
+ FetchRequestBatch
+ RequestBatch
+ BatchMessage
+ Metadata
+*/
+package pbft
+
+import proto "github.com/golang/protobuf/proto"
+import fmt "fmt"
+import math "math"
+import google_protobuf "google/protobuf"
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+type Message struct {
+ // Types that are valid to be assigned to Payload:
+ // *Message_RequestBatch
+ // *Message_PrePrepare
+ // *Message_Prepare
+ // *Message_Commit
+ // *Message_Checkpoint
+ // *Message_ViewChange
+ // *Message_NewView
+ // *Message_FetchRequestBatch
+ // *Message_ReturnRequestBatch
+ Payload isMessage_Payload `protobuf_oneof:"payload"`
+}
+
+func (m *Message) Reset() { *m = Message{} }
+func (m *Message) String() string { return proto.CompactTextString(m) }
+func (*Message) ProtoMessage() {}
+
+type isMessage_Payload interface {
+ isMessage_Payload()
+}
+
+type Message_RequestBatch struct {
+ RequestBatch *RequestBatch `protobuf:"bytes,1,opt,name=request_batch,oneof"`
+}
+type Message_PrePrepare struct {
+ PrePrepare *PrePrepare `protobuf:"bytes,2,opt,name=pre_prepare,oneof"`
+}
+type Message_Prepare struct {
+ Prepare *Prepare `protobuf:"bytes,3,opt,name=prepare,oneof"`
+}
+type Message_Commit struct {
+ Commit *Commit `protobuf:"bytes,4,opt,name=commit,oneof"`
+}
+type Message_Checkpoint struct {
+ Checkpoint *Checkpoint `protobuf:"bytes,5,opt,name=checkpoint,oneof"`
+}
+type Message_ViewChange struct {
+ ViewChange *ViewChange `protobuf:"bytes,6,opt,name=view_change,oneof"`
+}
+type Message_NewView struct {
+ NewView *NewView `protobuf:"bytes,7,opt,name=new_view,oneof"`
+}
+type Message_FetchRequestBatch struct {
+ FetchRequestBatch *FetchRequestBatch `protobuf:"bytes,8,opt,name=fetch_request_batch,oneof"`
+}
+type Message_ReturnRequestBatch struct {
+ ReturnRequestBatch *RequestBatch `protobuf:"bytes,9,opt,name=return_request_batch,oneof"`
+}
+
+func (*Message_RequestBatch) isMessage_Payload() {}
+func (*Message_PrePrepare) isMessage_Payload() {}
+func (*Message_Prepare) isMessage_Payload() {}
+func (*Message_Commit) isMessage_Payload() {}
+func (*Message_Checkpoint) isMessage_Payload() {}
+func (*Message_ViewChange) isMessage_Payload() {}
+func (*Message_NewView) isMessage_Payload() {}
+func (*Message_FetchRequestBatch) isMessage_Payload() {}
+func (*Message_ReturnRequestBatch) isMessage_Payload() {}
+
+func (m *Message) GetPayload() isMessage_Payload {
+ if m != nil {
+ return m.Payload
+ }
+ return nil
+}
+
+func (m *Message) GetRequestBatch() *RequestBatch {
+ if x, ok := m.GetPayload().(*Message_RequestBatch); ok {
+ return x.RequestBatch
+ }
+ return nil
+}
+
+func (m *Message) GetPrePrepare() *PrePrepare {
+ if x, ok := m.GetPayload().(*Message_PrePrepare); ok {
+ return x.PrePrepare
+ }
+ return nil
+}
+
+func (m *Message) GetPrepare() *Prepare {
+ if x, ok := m.GetPayload().(*Message_Prepare); ok {
+ return x.Prepare
+ }
+ return nil
+}
+
+func (m *Message) GetCommit() *Commit {
+ if x, ok := m.GetPayload().(*Message_Commit); ok {
+ return x.Commit
+ }
+ return nil
+}
+
+func (m *Message) GetCheckpoint() *Checkpoint {
+ if x, ok := m.GetPayload().(*Message_Checkpoint); ok {
+ return x.Checkpoint
+ }
+ return nil
+}
+
+func (m *Message) GetViewChange() *ViewChange {
+ if x, ok := m.GetPayload().(*Message_ViewChange); ok {
+ return x.ViewChange
+ }
+ return nil
+}
+
+func (m *Message) GetNewView() *NewView {
+ if x, ok := m.GetPayload().(*Message_NewView); ok {
+ return x.NewView
+ }
+ return nil
+}
+
+func (m *Message) GetFetchRequestBatch() *FetchRequestBatch {
+ if x, ok := m.GetPayload().(*Message_FetchRequestBatch); ok {
+ return x.FetchRequestBatch
+ }
+ return nil
+}
+
+func (m *Message) GetReturnRequestBatch() *RequestBatch {
+ if x, ok := m.GetPayload().(*Message_ReturnRequestBatch); ok {
+ return x.ReturnRequestBatch
+ }
+ return nil
+}
+
+// XXX_OneofFuncs is for the internal use of the proto package.
+func (*Message) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), []interface{}) {
+ return _Message_OneofMarshaler, _Message_OneofUnmarshaler, []interface{}{
+ (*Message_RequestBatch)(nil),
+ (*Message_PrePrepare)(nil),
+ (*Message_Prepare)(nil),
+ (*Message_Commit)(nil),
+ (*Message_Checkpoint)(nil),
+ (*Message_ViewChange)(nil),
+ (*Message_NewView)(nil),
+ (*Message_FetchRequestBatch)(nil),
+ (*Message_ReturnRequestBatch)(nil),
+ }
+}
+
+func _Message_OneofMarshaler(msg proto.Message, b *proto.Buffer) error {
+ m := msg.(*Message)
+ // payload
+ switch x := m.Payload.(type) {
+ case *Message_RequestBatch:
+ b.EncodeVarint(1<<3 | proto.WireBytes)
+ if err := b.EncodeMessage(x.RequestBatch); err != nil {
+ return err
+ }
+ case *Message_PrePrepare:
+ b.EncodeVarint(2<<3 | proto.WireBytes)
+ if err := b.EncodeMessage(x.PrePrepare); err != nil {
+ return err
+ }
+ case *Message_Prepare:
+ b.EncodeVarint(3<<3 | proto.WireBytes)
+ if err := b.EncodeMessage(x.Prepare); err != nil {
+ return err
+ }
+ case *Message_Commit:
+ b.EncodeVarint(4<<3 | proto.WireBytes)
+ if err := b.EncodeMessage(x.Commit); err != nil {
+ return err
+ }
+ case *Message_Checkpoint:
+ b.EncodeVarint(5<<3 | proto.WireBytes)
+ if err := b.EncodeMessage(x.Checkpoint); err != nil {
+ return err
+ }
+ case *Message_ViewChange:
+ b.EncodeVarint(6<<3 | proto.WireBytes)
+ if err := b.EncodeMessage(x.ViewChange); err != nil {
+ return err
+ }
+ case *Message_NewView:
+ b.EncodeVarint(7<<3 | proto.WireBytes)
+ if err := b.EncodeMessage(x.NewView); err != nil {
+ return err
+ }
+ case *Message_FetchRequestBatch:
+ b.EncodeVarint(8<<3 | proto.WireBytes)
+ if err := b.EncodeMessage(x.FetchRequestBatch); err != nil {
+ return err
+ }
+ case *Message_ReturnRequestBatch:
+ b.EncodeVarint(9<<3 | proto.WireBytes)
+ if err := b.EncodeMessage(x.ReturnRequestBatch); err != nil {
+ return err
+ }
+ case nil:
+ default:
+ return fmt.Errorf("Message.Payload has unexpected type %T", x)
+ }
+ return nil
+}
+
+func _Message_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) {
+ m := msg.(*Message)
+ switch tag {
+ case 1: // payload.request_batch
+ if wire != proto.WireBytes {
+ return true, proto.ErrInternalBadWireType
+ }
+ msg := new(RequestBatch)
+ err := b.DecodeMessage(msg)
+ m.Payload = &Message_RequestBatch{msg}
+ return true, err
+ case 2: // payload.pre_prepare
+ if wire != proto.WireBytes {
+ return true, proto.ErrInternalBadWireType
+ }
+ msg := new(PrePrepare)
+ err := b.DecodeMessage(msg)
+ m.Payload = &Message_PrePrepare{msg}
+ return true, err
+ case 3: // payload.prepare
+ if wire != proto.WireBytes {
+ return true, proto.ErrInternalBadWireType
+ }
+ msg := new(Prepare)
+ err := b.DecodeMessage(msg)
+ m.Payload = &Message_Prepare{msg}
+ return true, err
+ case 4: // payload.commit
+ if wire != proto.WireBytes {
+ return true, proto.ErrInternalBadWireType
+ }
+ msg := new(Commit)
+ err := b.DecodeMessage(msg)
+ m.Payload = &Message_Commit{msg}
+ return true, err
+ case 5: // payload.checkpoint
+ if wire != proto.WireBytes {
+ return true, proto.ErrInternalBadWireType
+ }
+ msg := new(Checkpoint)
+ err := b.DecodeMessage(msg)
+ m.Payload = &Message_Checkpoint{msg}
+ return true, err
+ case 6: // payload.view_change
+ if wire != proto.WireBytes {
+ return true, proto.ErrInternalBadWireType
+ }
+ msg := new(ViewChange)
+ err := b.DecodeMessage(msg)
+ m.Payload = &Message_ViewChange{msg}
+ return true, err
+ case 7: // payload.new_view
+ if wire != proto.WireBytes {
+ return true, proto.ErrInternalBadWireType
+ }
+ msg := new(NewView)
+ err := b.DecodeMessage(msg)
+ m.Payload = &Message_NewView{msg}
+ return true, err
+ case 8: // payload.fetch_request_batch
+ if wire != proto.WireBytes {
+ return true, proto.ErrInternalBadWireType
+ }
+ msg := new(FetchRequestBatch)
+ err := b.DecodeMessage(msg)
+ m.Payload = &Message_FetchRequestBatch{msg}
+ return true, err
+ case 9: // payload.return_request_batch
+ if wire != proto.WireBytes {
+ return true, proto.ErrInternalBadWireType
+ }
+ msg := new(RequestBatch)
+ err := b.DecodeMessage(msg)
+ m.Payload = &Message_ReturnRequestBatch{msg}
+ return true, err
+ default:
+ return false, nil
+ }
+}
+
+type Request struct {
+ Timestamp *google_protobuf.Timestamp `protobuf:"bytes,1,opt,name=timestamp" json:"timestamp,omitempty"`
+ Payload []byte `protobuf:"bytes,2,opt,name=payload,proto3" json:"payload,omitempty"`
+ ReplicaId uint64 `protobuf:"varint,3,opt,name=replica_id" json:"replica_id,omitempty"`
+ Signature []byte `protobuf:"bytes,4,opt,name=signature,proto3" json:"signature,omitempty"`
+}
+
+func (m *Request) Reset() { *m = Request{} }
+func (m *Request) String() string { return proto.CompactTextString(m) }
+func (*Request) ProtoMessage() {}
+
+func (m *Request) GetTimestamp() *google_protobuf.Timestamp {
+ if m != nil {
+ return m.Timestamp
+ }
+ return nil
+}
+
+type PrePrepare struct {
+ View uint64 `protobuf:"varint,1,opt,name=view" json:"view,omitempty"`
+ SequenceNumber uint64 `protobuf:"varint,2,opt,name=sequence_number" json:"sequence_number,omitempty"`
+ BatchDigest string `protobuf:"bytes,3,opt,name=batch_digest" json:"batch_digest,omitempty"`
+ RequestBatch *RequestBatch `protobuf:"bytes,4,opt,name=request_batch" json:"request_batch,omitempty"`
+ ReplicaId uint64 `protobuf:"varint,5,opt,name=replica_id" json:"replica_id,omitempty"`
+}
+
+func (m *PrePrepare) Reset() { *m = PrePrepare{} }
+func (m *PrePrepare) String() string { return proto.CompactTextString(m) }
+func (*PrePrepare) ProtoMessage() {}
+
+func (m *PrePrepare) GetRequestBatch() *RequestBatch {
+ if m != nil {
+ return m.RequestBatch
+ }
+ return nil
+}
+
+type Prepare struct {
+ View uint64 `protobuf:"varint,1,opt,name=view" json:"view,omitempty"`
+ SequenceNumber uint64 `protobuf:"varint,2,opt,name=sequence_number" json:"sequence_number,omitempty"`
+ BatchDigest string `protobuf:"bytes,3,opt,name=batch_digest" json:"batch_digest,omitempty"`
+ ReplicaId uint64 `protobuf:"varint,4,opt,name=replica_id" json:"replica_id,omitempty"`
+}
+
+func (m *Prepare) Reset() { *m = Prepare{} }
+func (m *Prepare) String() string { return proto.CompactTextString(m) }
+func (*Prepare) ProtoMessage() {}
+
+type Commit struct {
+ View uint64 `protobuf:"varint,1,opt,name=view" json:"view,omitempty"`
+ SequenceNumber uint64 `protobuf:"varint,2,opt,name=sequence_number" json:"sequence_number,omitempty"`
+ BatchDigest string `protobuf:"bytes,3,opt,name=batch_digest" json:"batch_digest,omitempty"`
+ ReplicaId uint64 `protobuf:"varint,4,opt,name=replica_id" json:"replica_id,omitempty"`
+}
+
+func (m *Commit) Reset() { *m = Commit{} }
+func (m *Commit) String() string { return proto.CompactTextString(m) }
+func (*Commit) ProtoMessage() {}
+
+type BlockInfo struct {
+ BlockNumber uint64 `protobuf:"varint,1,opt,name=block_number" json:"block_number,omitempty"`
+ BlockHash []byte `protobuf:"bytes,2,opt,name=block_hash,proto3" json:"block_hash,omitempty"`
+}
+
+func (m *BlockInfo) Reset() { *m = BlockInfo{} }
+func (m *BlockInfo) String() string { return proto.CompactTextString(m) }
+func (*BlockInfo) ProtoMessage() {}
+
+type Checkpoint struct {
+ SequenceNumber uint64 `protobuf:"varint,1,opt,name=sequence_number" json:"sequence_number,omitempty"`
+ ReplicaId uint64 `protobuf:"varint,2,opt,name=replica_id" json:"replica_id,omitempty"`
+ Id string `protobuf:"bytes,3,opt,name=id" json:"id,omitempty"`
+}
+
+func (m *Checkpoint) Reset() { *m = Checkpoint{} }
+func (m *Checkpoint) String() string { return proto.CompactTextString(m) }
+func (*Checkpoint) ProtoMessage() {}
+
+type ViewChange struct {
+ View uint64 `protobuf:"varint,1,opt,name=view" json:"view,omitempty"`
+ H uint64 `protobuf:"varint,2,opt,name=h" json:"h,omitempty"`
+ Cset []*ViewChange_C `protobuf:"bytes,3,rep,name=cset" json:"cset,omitempty"`
+ Pset []*ViewChange_PQ `protobuf:"bytes,4,rep,name=pset" json:"pset,omitempty"`
+ Qset []*ViewChange_PQ `protobuf:"bytes,5,rep,name=qset" json:"qset,omitempty"`
+ ReplicaId uint64 `protobuf:"varint,6,opt,name=replica_id" json:"replica_id,omitempty"`
+ Signature []byte `protobuf:"bytes,7,opt,name=signature,proto3" json:"signature,omitempty"`
+}
+
+func (m *ViewChange) Reset() { *m = ViewChange{} }
+func (m *ViewChange) String() string { return proto.CompactTextString(m) }
+func (*ViewChange) ProtoMessage() {}
+
+func (m *ViewChange) GetCset() []*ViewChange_C {
+ if m != nil {
+ return m.Cset
+ }
+ return nil
+}
+
+func (m *ViewChange) GetPset() []*ViewChange_PQ {
+ if m != nil {
+ return m.Pset
+ }
+ return nil
+}
+
+func (m *ViewChange) GetQset() []*ViewChange_PQ {
+ if m != nil {
+ return m.Qset
+ }
+ return nil
+}
+
+// This message should go away and become a checkpoint once replica_id is removed
+type ViewChange_C struct {
+ SequenceNumber uint64 `protobuf:"varint,1,opt,name=sequence_number" json:"sequence_number,omitempty"`
+ Id string `protobuf:"bytes,3,opt,name=id" json:"id,omitempty"`
+}
+
+func (m *ViewChange_C) Reset() { *m = ViewChange_C{} }
+func (m *ViewChange_C) String() string { return proto.CompactTextString(m) }
+func (*ViewChange_C) ProtoMessage() {}
+
+type ViewChange_PQ struct {
+ SequenceNumber uint64 `protobuf:"varint,1,opt,name=sequence_number" json:"sequence_number,omitempty"`
+ BatchDigest string `protobuf:"bytes,2,opt,name=batch_digest" json:"batch_digest,omitempty"`
+ View uint64 `protobuf:"varint,3,opt,name=view" json:"view,omitempty"`
+}
+
+func (m *ViewChange_PQ) Reset() { *m = ViewChange_PQ{} }
+func (m *ViewChange_PQ) String() string { return proto.CompactTextString(m) }
+func (*ViewChange_PQ) ProtoMessage() {}
+
+type PQset struct {
+ Set []*ViewChange_PQ `protobuf:"bytes,1,rep,name=set" json:"set,omitempty"`
+}
+
+func (m *PQset) Reset() { *m = PQset{} }
+func (m *PQset) String() string { return proto.CompactTextString(m) }
+func (*PQset) ProtoMessage() {}
+
+func (m *PQset) GetSet() []*ViewChange_PQ {
+ if m != nil {
+ return m.Set
+ }
+ return nil
+}
+
+type NewView struct {
+ View uint64 `protobuf:"varint,1,opt,name=view" json:"view,omitempty"`
+ Vset []*ViewChange `protobuf:"bytes,2,rep,name=vset" json:"vset,omitempty"`
+ Xset map[uint64]string `protobuf:"bytes,3,rep,name=xset" json:"xset,omitempty" protobuf_key:"varint,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"`
+ ReplicaId uint64 `protobuf:"varint,4,opt,name=replica_id" json:"replica_id,omitempty"`
+}
+
+func (m *NewView) Reset() { *m = NewView{} }
+func (m *NewView) String() string { return proto.CompactTextString(m) }
+func (*NewView) ProtoMessage() {}
+
+func (m *NewView) GetVset() []*ViewChange {
+ if m != nil {
+ return m.Vset
+ }
+ return nil
+}
+
+func (m *NewView) GetXset() map[uint64]string {
+ if m != nil {
+ return m.Xset
+ }
+ return nil
+}
+
+type FetchRequestBatch struct {
+ BatchDigest string `protobuf:"bytes,1,opt,name=batch_digest" json:"batch_digest,omitempty"`
+ ReplicaId uint64 `protobuf:"varint,2,opt,name=replica_id" json:"replica_id,omitempty"`
+}
+
+func (m *FetchRequestBatch) Reset() { *m = FetchRequestBatch{} }
+func (m *FetchRequestBatch) String() string { return proto.CompactTextString(m) }
+func (*FetchRequestBatch) ProtoMessage() {}
+
+type RequestBatch struct {
+ Batch []*Request `protobuf:"bytes,1,rep,name=batch" json:"batch,omitempty"`
+}
+
+func (m *RequestBatch) Reset() { *m = RequestBatch{} }
+func (m *RequestBatch) String() string { return proto.CompactTextString(m) }
+func (*RequestBatch) ProtoMessage() {}
+
+func (m *RequestBatch) GetBatch() []*Request {
+ if m != nil {
+ return m.Batch
+ }
+ return nil
+}
+
+type BatchMessage struct {
+ // Types that are valid to be assigned to Payload:
+ // *BatchMessage_Request
+ // *BatchMessage_RequestBatch
+ // *BatchMessage_PbftMessage
+ // *BatchMessage_Complaint
+ Payload isBatchMessage_Payload `protobuf_oneof:"payload"`
+}
+
+func (m *BatchMessage) Reset() { *m = BatchMessage{} }
+func (m *BatchMessage) String() string { return proto.CompactTextString(m) }
+func (*BatchMessage) ProtoMessage() {}
+
+type isBatchMessage_Payload interface {
+ isBatchMessage_Payload()
+}
+
+type BatchMessage_Request struct {
+ Request *Request `protobuf:"bytes,1,opt,name=request,oneof"`
+}
+type BatchMessage_RequestBatch struct {
+ RequestBatch *RequestBatch `protobuf:"bytes,2,opt,name=request_batch,oneof"`
+}
+type BatchMessage_PbftMessage struct {
+ PbftMessage []byte `protobuf:"bytes,3,opt,name=pbft_message,proto3,oneof"`
+}
+type BatchMessage_Complaint struct {
+ Complaint *Request `protobuf:"bytes,4,opt,name=complaint,oneof"`
+}
+
+func (*BatchMessage_Request) isBatchMessage_Payload() {}
+func (*BatchMessage_RequestBatch) isBatchMessage_Payload() {}
+func (*BatchMessage_PbftMessage) isBatchMessage_Payload() {}
+func (*BatchMessage_Complaint) isBatchMessage_Payload() {}
+
+func (m *BatchMessage) GetPayload() isBatchMessage_Payload {
+ if m != nil {
+ return m.Payload
+ }
+ return nil
+}
+
+func (m *BatchMessage) GetRequest() *Request {
+ if x, ok := m.GetPayload().(*BatchMessage_Request); ok {
+ return x.Request
+ }
+ return nil
+}
+
+func (m *BatchMessage) GetRequestBatch() *RequestBatch {
+ if x, ok := m.GetPayload().(*BatchMessage_RequestBatch); ok {
+ return x.RequestBatch
+ }
+ return nil
+}
+
+func (m *BatchMessage) GetPbftMessage() []byte {
+ if x, ok := m.GetPayload().(*BatchMessage_PbftMessage); ok {
+ return x.PbftMessage
+ }
+ return nil
+}
+
+func (m *BatchMessage) GetComplaint() *Request {
+ if x, ok := m.GetPayload().(*BatchMessage_Complaint); ok {
+ return x.Complaint
+ }
+ return nil
+}
+
+// XXX_OneofFuncs is for the internal use of the proto package.
+func (*BatchMessage) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), []interface{}) {
+ return _BatchMessage_OneofMarshaler, _BatchMessage_OneofUnmarshaler, []interface{}{
+ (*BatchMessage_Request)(nil),
+ (*BatchMessage_RequestBatch)(nil),
+ (*BatchMessage_PbftMessage)(nil),
+ (*BatchMessage_Complaint)(nil),
+ }
+}
+
+func _BatchMessage_OneofMarshaler(msg proto.Message, b *proto.Buffer) error {
+ m := msg.(*BatchMessage)
+ // payload
+ switch x := m.Payload.(type) {
+ case *BatchMessage_Request:
+ b.EncodeVarint(1<<3 | proto.WireBytes)
+ if err := b.EncodeMessage(x.Request); err != nil {
+ return err
+ }
+ case *BatchMessage_RequestBatch:
+ b.EncodeVarint(2<<3 | proto.WireBytes)
+ if err := b.EncodeMessage(x.RequestBatch); err != nil {
+ return err
+ }
+ case *BatchMessage_PbftMessage:
+ b.EncodeVarint(3<<3 | proto.WireBytes)
+ b.EncodeRawBytes(x.PbftMessage)
+ case *BatchMessage_Complaint:
+ b.EncodeVarint(4<<3 | proto.WireBytes)
+ if err := b.EncodeMessage(x.Complaint); err != nil {
+ return err
+ }
+ case nil:
+ default:
+ return fmt.Errorf("BatchMessage.Payload has unexpected type %T", x)
+ }
+ return nil
+}
+
+func _BatchMessage_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) {
+ m := msg.(*BatchMessage)
+ switch tag {
+ case 1: // payload.request
+ if wire != proto.WireBytes {
+ return true, proto.ErrInternalBadWireType
+ }
+ msg := new(Request)
+ err := b.DecodeMessage(msg)
+ m.Payload = &BatchMessage_Request{msg}
+ return true, err
+ case 2: // payload.request_batch
+ if wire != proto.WireBytes {
+ return true, proto.ErrInternalBadWireType
+ }
+ msg := new(RequestBatch)
+ err := b.DecodeMessage(msg)
+ m.Payload = &BatchMessage_RequestBatch{msg}
+ return true, err
+ case 3: // payload.pbft_message
+ if wire != proto.WireBytes {
+ return true, proto.ErrInternalBadWireType
+ }
+ x, err := b.DecodeRawBytes(true)
+ m.Payload = &BatchMessage_PbftMessage{x}
+ return true, err
+ case 4: // payload.complaint
+ if wire != proto.WireBytes {
+ return true, proto.ErrInternalBadWireType
+ }
+ msg := new(Request)
+ err := b.DecodeMessage(msg)
+ m.Payload = &BatchMessage_Complaint{msg}
+ return true, err
+ default:
+ return false, nil
+ }
+}
+
+type Metadata struct {
+ SeqNo uint64 `protobuf:"varint,1,opt,name=seqNo" json:"seqNo,omitempty"`
+}
+
+func (m *Metadata) Reset() { *m = Metadata{} }
+func (m *Metadata) String() string { return proto.CompactTextString(m) }
+func (*Metadata) ProtoMessage() {}
diff --git a/consensus/pbft/messages.proto b/consensus/pbft/messages.proto
new file mode 100644
index 00000000000..c13097f5dae
--- /dev/null
+++ b/consensus/pbft/messages.proto
@@ -0,0 +1,147 @@
+/*
+Copyright IBM Corp. 2016 All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+syntax = "proto3";
+
+import "google/protobuf/timestamp.proto";
+
+package pbft;
+
+/*
+ * mapping to PBFT paper names
+ *
+ * PBFT name: local name
+ *
+ * o: transaction
+ * t: timestamp
+ * c: client
+ * v: view
+ * n: sequenceNumber
+ * D(m): requestDigest
+ * i: replicaId
+ */
+
+message message {
+ oneof payload {
+ request_batch request_batch = 1;
+ pre_prepare pre_prepare = 2;
+ prepare prepare = 3;
+ commit commit = 4;
+ checkpoint checkpoint = 5;
+ view_change view_change = 6;
+ new_view new_view = 7;
+ fetch_request_batch fetch_request_batch = 8;
+ request_batch return_request_batch = 9;
+ }
+}
+
+message request {
+ google.protobuf.Timestamp timestamp = 1; // Generated at the client level. Ensures that client's requests are atomically ordered.
+ bytes payload = 2; // opaque payload
+ uint64 replica_id = 3;
+ bytes signature = 4;
+}
+
+message pre_prepare {
+ uint64 view = 1;
+ uint64 sequence_number = 2;
+ string batch_digest = 3;
+ request_batch request_batch = 4;
+ uint64 replica_id = 5;
+}
+
+message prepare {
+ uint64 view = 1;
+ uint64 sequence_number = 2;
+ string batch_digest = 3;
+ uint64 replica_id = 4;
+}
+
+message commit {
+ uint64 view = 1;
+ uint64 sequence_number = 2;
+ string batch_digest = 3;
+ uint64 replica_id = 4;
+}
+
+message block_info {
+ uint64 block_number = 1;
+ bytes block_hash = 2;
+}
+
+message checkpoint {
+ uint64 sequence_number = 1;
+ uint64 replica_id = 2;
+ string id = 3;
+}
+
+message view_change {
+ /* This message should go away and become a checkpoint once replica_id is removed */
+ message C {
+ uint64 sequence_number = 1;
+ string id = 3;
+ }
+ message PQ {
+ uint64 sequence_number = 1;
+ string batch_digest = 2;
+ uint64 view = 3;
+ }
+
+ uint64 view = 1;
+ uint64 h = 2;
+ repeated C cset = 3;
+ repeated PQ pset = 4;
+ repeated PQ qset = 5;
+ uint64 replica_id = 6;
+ bytes signature = 7;
+}
+
+message PQset {
+ repeated view_change.PQ set = 1;
+}
+
+message new_view {
+ uint64 view = 1;
+ repeated view_change vset = 2;
+ map xset = 3;
+ uint64 replica_id = 4;
+}
+
+message fetch_request_batch {
+ string batch_digest = 1;
+ uint64 replica_id = 2;
+}
+
+// batch
+
+message request_batch {
+ repeated request batch = 1;
+};
+
+message batch_message {
+ oneof payload {
+ request request = 1;
+ request_batch request_batch = 2;
+ bytes pbft_message = 3;
+ request complaint = 4; // like request, but processed everywhere
+ }
+}
+
+// consensus metadata
+
+message metadata {
+ uint64 seqNo = 1;
+}
diff --git a/consensus/pbft/mock_consumer_test.go b/consensus/pbft/mock_consumer_test.go
new file mode 100644
index 00000000000..db559fdaa17
--- /dev/null
+++ b/consensus/pbft/mock_consumer_test.go
@@ -0,0 +1,155 @@
+/*
+Copyright IBM Corp. 2016 All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package pbft
+
+import (
+ "math/rand"
+ "time"
+
+ "github.com/hyperledger/fabric/consensus"
+ "github.com/hyperledger/fabric/consensus/util/events"
+ pb "github.com/hyperledger/fabric/protos"
+
+ "github.com/spf13/viper"
+)
+
+type consumerEndpoint struct {
+ *testEndpoint
+ consumer pbftConsumer
+ execTxResult func([]*pb.Transaction) ([]byte, error)
+}
+
+func (ce *consumerEndpoint) stop() {
+ ce.consumer.Close()
+}
+
+func (ce *consumerEndpoint) isBusy() bool {
+ pbft := ce.consumer.getPBFTCore()
+ if pbft.timerActive || pbft.skipInProgress || pbft.currentExec != nil {
+ ce.net.debugMsg("Reporting busy because of timer (%v) or skipInProgress (%v) or currentExec (%v)\n", pbft.timerActive, pbft.skipInProgress, pbft.currentExec)
+ return true
+ }
+
+ select {
+ case <-ce.consumer.idleChannel():
+ default:
+ ce.net.debugMsg("Reporting busy because consumer not idle\n")
+ return true
+ }
+
+ select {
+ case ce.consumer.getManager().Queue() <- nil:
+ ce.net.debugMsg("Reporting busy because pbft not idle\n")
+ default:
+ return true
+ }
+ //}
+
+ return false
+}
+
+func (ce *consumerEndpoint) deliver(msg []byte, senderHandle *pb.PeerID) {
+ ce.consumer.RecvMsg(&pb.Message{Type: pb.Message_CONSENSUS, Payload: msg}, senderHandle)
+}
+
+type completeStack struct {
+ *consumerEndpoint
+ *noopSecurity
+ *MockLedger
+ mockPersist
+ skipTarget chan struct{}
+}
+
+const MaxStateTransferTime int = 200
+
+func (cs *completeStack) ValidateState() {}
+func (cs *completeStack) InvalidateState() {}
+func (cs *completeStack) Start() {}
+func (cs *completeStack) Halt() {}
+
+func (cs *completeStack) UpdateState(tag interface{}, target *pb.BlockchainInfo, peers []*pb.PeerID) {
+ select {
+ // This guarantees the first SkipTo call is the one that's queued, whereas a mutex can be raced for
+ case cs.skipTarget <- struct{}{}:
+ go func() {
+ // State transfer takes time, not simulating this hides bugs
+ time.Sleep(time.Duration((MaxStateTransferTime/2)+rand.Intn(MaxStateTransferTime/2)) * time.Millisecond)
+ cs.simulateStateTransfer(target, peers)
+ cs.consumer.StateUpdated(tag, cs.GetBlockchainInfo())
+ <-cs.skipTarget // Basically like releasing a mutex
+ }()
+ default:
+ cs.net.debugMsg("Ignoring skipTo because one is already in progress\n")
+ }
+}
+
+type pbftConsumer interface {
+ innerStack
+ consensus.Consenter
+ getPBFTCore() *pbftCore
+ getManager() events.Manager // TODO, remove, this is a temporary measure
+ Close()
+ idleChannel() <-chan struct{}
+}
+
+type consumerNetwork struct {
+ *testnet
+ mockLedgers []*MockLedger
+}
+
+func (cnet *consumerNetwork) GetLedgerByPeerID(peerID *pb.PeerID) (consensus.ReadOnlyLedger, bool) {
+ id, err := getValidatorID(peerID)
+ if nil != err {
+ return nil, false
+ }
+ return cnet.mockLedgers[id], true
+}
+
+func makeConsumerNetwork(N int, makeConsumer func(id uint64, config *viper.Viper, stack consensus.Stack) pbftConsumer, initFNs ...func(*consumerEndpoint)) *consumerNetwork {
+ twl := consumerNetwork{mockLedgers: make([]*MockLedger, N)}
+
+ endpointFunc := func(id uint64, net *testnet) endpoint {
+ tep := makeTestEndpoint(id, net)
+ ce := &consumerEndpoint{
+ testEndpoint: tep,
+ }
+
+ ml := NewMockLedger(&twl)
+ ml.ce = ce
+ twl.mockLedgers[id] = ml
+
+ cs := &completeStack{
+ consumerEndpoint: ce,
+ noopSecurity: &noopSecurity{},
+ MockLedger: ml,
+ skipTarget: make(chan struct{}, 1),
+ }
+
+ ce.consumer = makeConsumer(id, loadConfig(), cs)
+ ce.consumer.getPBFTCore().N = N
+ ce.consumer.getPBFTCore().f = (N - 1) / 3
+
+ for _, fn := range initFNs {
+ fn(ce)
+ }
+
+ return ce
+ }
+
+ twl.testnet = makeTestnet(N, endpointFunc)
+ return &twl
+}
diff --git a/consensus/pbft/mock_ledger_test.go b/consensus/pbft/mock_ledger_test.go
new file mode 100644
index 00000000000..14b75750f2c
--- /dev/null
+++ b/consensus/pbft/mock_ledger_test.go
@@ -0,0 +1,332 @@
+/*
+Copyright IBM Corp. 2016 All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package pbft
+
+import (
+ "fmt"
+ "reflect"
+ "sync"
+ "time"
+
+ "github.com/golang/protobuf/proto"
+
+ "github.com/hyperledger/fabric/consensus"
+ "github.com/hyperledger/fabric/protos"
+)
+
+type LedgerDirectory interface {
+ GetLedgerByPeerID(peerID *protos.PeerID) (consensus.ReadOnlyLedger, bool)
+}
+
+type HashLedgerDirectory struct {
+ remoteLedgers map[protos.PeerID]consensus.ReadOnlyLedger
+}
+
+func (hd *HashLedgerDirectory) GetLedgerByPeerID(peerID *protos.PeerID) (consensus.ReadOnlyLedger, bool) {
+ ledger, ok := hd.remoteLedgers[*peerID]
+ return ledger, ok
+}
+
+func (hd *HashLedgerDirectory) GetPeers() (*protos.PeersMessage, error) {
+ _, network, err := hd.GetNetworkInfo()
+ return &protos.PeersMessage{Peers: network}, err
+}
+
+func (hd *HashLedgerDirectory) GetPeerEndpoint() (*protos.PeerEndpoint, error) {
+ self, _, err := hd.GetNetworkInfo()
+ return self, err
+}
+
+func (hd *HashLedgerDirectory) GetNetworkInfo() (self *protos.PeerEndpoint, network []*protos.PeerEndpoint, err error) {
+ network = make([]*protos.PeerEndpoint, len(hd.remoteLedgers)+1)
+ i := 0
+ for peerID := range hd.remoteLedgers {
+ peerID := peerID // Get a memory address which will not be overwritten
+ network[i] = &protos.PeerEndpoint{
+ ID: &peerID,
+ Type: protos.PeerEndpoint_VALIDATOR,
+ }
+ i++
+ }
+ network[i] = &protos.PeerEndpoint{
+ ID: &protos.PeerID{
+ Name: "SelfID",
+ },
+ Type: protos.PeerEndpoint_VALIDATOR,
+ }
+
+ self = network[i]
+ return
+}
+
+func (hd *HashLedgerDirectory) GetNetworkHandles() (self *protos.PeerID, network []*protos.PeerID, err error) {
+ oSelf, oNetwork, err := hd.GetNetworkInfo()
+ if nil != err {
+ return
+ }
+
+ self = oSelf.ID
+ network = make([]*protos.PeerID, len(oNetwork))
+ for i, endpoint := range oNetwork {
+ network[i] = endpoint.ID
+ }
+ return
+}
+
+type MockLedger struct {
+ cleanML *MockLedger
+ blocks map[uint64]*protos.Block
+ blockHeight uint64
+ remoteLedgers LedgerDirectory
+
+ mutex *sync.Mutex
+
+ txID interface{}
+ curBatch []*protos.Transaction
+ curResults []byte
+ preBatchState uint64
+
+ ce *consumerEndpoint // To support the ExecTx stuff
+}
+
+func NewMockLedger(remoteLedgers LedgerDirectory) *MockLedger {
+ mock := &MockLedger{}
+ mock.mutex = &sync.Mutex{}
+ mock.blocks = make(map[uint64]*protos.Block)
+ mock.blockHeight = 1
+ mock.blocks[0] = &protos.Block{}
+ mock.remoteLedgers = remoteLedgers
+
+ return mock
+}
+
+func (mock *MockLedger) BeginTxBatch(id interface{}) error {
+ if mock.txID != nil {
+ return fmt.Errorf("Tx batch is already active")
+ }
+ mock.txID = id
+ mock.curBatch = nil
+ mock.curResults = nil
+ return nil
+}
+
+func (mock *MockLedger) Execute(tag interface{}, txs []*protos.Transaction) {
+ go func() {
+ if mock.txID == nil {
+ mock.BeginTxBatch(mock)
+ }
+
+ _, err := mock.ExecTxs(mock, txs)
+ if err != nil {
+ panic(err)
+ }
+ mock.ce.consumer.Executed(tag)
+ }()
+}
+
+func (mock *MockLedger) Commit(tag interface{}, meta []byte) {
+ go func() {
+ _, err := mock.CommitTxBatch(mock, meta)
+ if err != nil {
+ panic(err)
+ }
+ mock.ce.consumer.Committed(tag, mock.GetBlockchainInfo())
+ }()
+}
+
+func (mock *MockLedger) Rollback(tag interface{}) {
+ go func() {
+ mock.RollbackTxBatch(mock)
+ mock.ce.consumer.RolledBack(tag)
+ }()
+}
+
+func (mock *MockLedger) ExecTxs(id interface{}, txs []*protos.Transaction) ([]byte, error) {
+ if !reflect.DeepEqual(mock.txID, id) {
+ return nil, fmt.Errorf("Invalid batch ID")
+ }
+
+ mock.curBatch = append(mock.curBatch, txs...)
+ var err error
+ var txResult []byte
+ if nil != mock.ce && nil != mock.ce.execTxResult {
+ txResult, err = mock.ce.execTxResult(txs)
+ } else {
+ // This is basically a default fake default transaction execution
+ if nil == txs {
+ txs = []*protos.Transaction{{Payload: []byte("DUMMY")}}
+ }
+
+ for _, transaction := range txs {
+ if transaction.Payload == nil {
+ transaction.Payload = []byte("DUMMY")
+ }
+
+ txResult = append(txResult, transaction.Payload...)
+ }
+
+ }
+
+ mock.curResults = append(mock.curResults, txResult...)
+
+ return txResult, err
+}
+
+func (mock *MockLedger) CommitTxBatch(id interface{}, metadata []byte) (*protos.Block, error) {
+ block, err := mock.commonCommitTx(id, metadata, false)
+ if nil == err {
+ mock.txID = nil
+ mock.curBatch = nil
+ mock.curResults = nil
+ }
+ return block, err
+}
+
+func (mock *MockLedger) commonCommitTx(id interface{}, metadata []byte, preview bool) (*protos.Block, error) {
+ if !reflect.DeepEqual(mock.txID, id) {
+ return nil, fmt.Errorf("Invalid batch ID")
+ }
+
+ previousBlockHash := []byte("Genesis")
+ if 0 < mock.blockHeight {
+ previousBlock, _ := mock.GetBlock(mock.blockHeight - 1)
+ previousBlockHash, _ = mock.HashBlock(previousBlock)
+ }
+
+ block := &protos.Block{
+ ConsensusMetadata: metadata,
+ PreviousBlockHash: previousBlockHash,
+ StateHash: mock.curResults, // Use the current result output in the hash
+ Transactions: mock.curBatch,
+ NonHashData: &protos.NonHashData{},
+ }
+
+ if !preview {
+ hash, _ := mock.HashBlock(block)
+ fmt.Printf("TEST LEDGER: Mock ledger is inserting block %d with hash %x\n", mock.blockHeight, hash)
+ mock.blocks[mock.blockHeight] = block
+ mock.blockHeight++
+ }
+
+ return block, nil
+}
+
+func (mock *MockLedger) PreviewCommitTxBatch(id interface{}, metadata []byte) ([]byte, error) {
+ b, err := mock.commonCommitTx(id, metadata, true)
+ if err != nil {
+ return nil, err
+ }
+ return mock.getBlockInfoBlob(mock.blockHeight+1, b), nil
+}
+
+func (mock *MockLedger) RollbackTxBatch(id interface{}) error {
+ if !reflect.DeepEqual(mock.txID, id) {
+ return fmt.Errorf("Invalid batch ID")
+ }
+ mock.curBatch = nil
+ mock.curResults = nil
+ mock.txID = nil
+ return nil
+}
+
+func (mock *MockLedger) GetBlockchainSize() uint64 {
+ mock.mutex.Lock()
+ defer func() {
+ mock.mutex.Unlock()
+ }()
+ return mock.blockHeight
+}
+
+func (mock *MockLedger) GetBlock(id uint64) (*protos.Block, error) {
+ mock.mutex.Lock()
+ defer func() {
+ mock.mutex.Unlock()
+ }()
+ block, ok := mock.blocks[id]
+ if !ok {
+ return nil, fmt.Errorf("Block not found")
+ }
+ return block, nil
+}
+
+func (mock *MockLedger) HashBlock(block *protos.Block) ([]byte, error) {
+ return block.GetHash()
+}
+
+func (mock *MockLedger) GetBlockchainInfo() *protos.BlockchainInfo {
+ b, _ := mock.GetBlock(mock.blockHeight - 1)
+ return mock.getBlockInfo(mock.blockHeight, b)
+}
+
+func (mock *MockLedger) GetBlockchainInfoBlob() []byte {
+ b, _ := mock.GetBlock(mock.blockHeight - 1)
+ return mock.getBlockInfoBlob(mock.blockHeight, b)
+}
+
+func (mock *MockLedger) getBlockInfoBlob(height uint64, block *protos.Block) []byte {
+ h, _ := proto.Marshal(mock.getBlockInfo(height, block))
+ return h
+}
+
+func (mock *MockLedger) getBlockInfo(height uint64, block *protos.Block) *protos.BlockchainInfo {
+ info := &protos.BlockchainInfo{Height: height}
+ info.CurrentBlockHash, _ = mock.HashBlock(block)
+ return info
+}
+
+func (mock *MockLedger) GetBlockHeadMetadata() ([]byte, error) {
+ b, ok := mock.blocks[mock.blockHeight-1]
+ if !ok {
+ return nil, fmt.Errorf("could not retrieve block from mock ledger")
+ }
+ return b.ConsensusMetadata, nil
+}
+
+func (mock *MockLedger) simulateStateTransfer(info *protos.BlockchainInfo, peers []*protos.PeerID) {
+ var remoteLedger consensus.ReadOnlyLedger
+ if len(peers) > 0 {
+ var ok bool
+ remoteLedger, ok = mock.remoteLedgers.GetLedgerByPeerID(peers[0])
+ if !ok {
+ panic("Asked for results from a peer which does not exist")
+ }
+ } else {
+ panic("TODO, support state transfer from nil peers")
+ }
+ fmt.Printf("TEST LEDGER skipping to %+v", info)
+ p := 0
+ if mock.blockHeight >= info.Height {
+ panic(fmt.Sprintf("Asked to skip to a block (%d) which is lower than our current height of %d", info.Height, mock.blockHeight))
+ }
+ for n := mock.blockHeight; n < info.Height; n++ {
+ block, err := remoteLedger.GetBlock(n)
+
+ if nil != err {
+ n--
+ fmt.Printf("TEST LEDGER: Block not ready yet")
+ time.Sleep(100 * time.Millisecond)
+ p++
+ if p > 10 {
+ panic("Tried to get a block 10 times, no luck")
+ }
+ continue
+ }
+
+ mock.blocks[n] = block
+ }
+ mock.blockHeight = info.Height
+}
diff --git a/consensus/pbft/mock_network_test.go b/consensus/pbft/mock_network_test.go
new file mode 100644
index 00000000000..35254b5787e
--- /dev/null
+++ b/consensus/pbft/mock_network_test.go
@@ -0,0 +1,303 @@
+/*
+Copyright IBM Corp. 2016 All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package pbft
+
+import (
+ "fmt"
+ "sync"
+ "time"
+
+ pb "github.com/hyperledger/fabric/protos"
+)
+
+type endpoint interface {
+ stop()
+ deliver([]byte, *pb.PeerID)
+ getHandle() *pb.PeerID
+ getID() uint64
+ isBusy() bool
+}
+
+type taggedMsg struct {
+ src int
+ dst int
+ msg []byte
+}
+
+type testnet struct {
+ debug bool
+ N int
+ closed chan struct{}
+ endpoints []endpoint
+ msgs chan taggedMsg
+ filterFn func(int, int, []byte) []byte
+}
+
+type testEndpoint struct {
+ id uint64
+ net *testnet
+}
+
+func makeTestEndpoint(id uint64, net *testnet) *testEndpoint {
+ ep := &testEndpoint{}
+ ep.id = id
+ ep.net = net
+ return ep
+}
+
+func (ep *testEndpoint) getID() uint64 {
+ return ep.id
+}
+
+func (ep *testEndpoint) getHandle() *pb.PeerID {
+ return &pb.PeerID{Name: fmt.Sprintf("vp%d", ep.id)}
+}
+
+func (ep *testEndpoint) GetNetworkInfo() (self *pb.PeerEndpoint, network []*pb.PeerEndpoint, err error) {
+ oSelf, oNetwork, _ := ep.GetNetworkHandles()
+ self = &pb.PeerEndpoint{
+ ID: oSelf,
+ Type: pb.PeerEndpoint_VALIDATOR,
+ }
+
+ network = make([]*pb.PeerEndpoint, len(oNetwork))
+ for i, id := range oNetwork {
+ network[i] = &pb.PeerEndpoint{
+ ID: id,
+ Type: pb.PeerEndpoint_VALIDATOR,
+ }
+ }
+ return
+}
+
+func (ep *testEndpoint) GetNetworkHandles() (self *pb.PeerID, network []*pb.PeerID, err error) {
+ if nil == ep.net {
+ err = fmt.Errorf("Network not initialized")
+ return
+ }
+ self = ep.getHandle()
+ network = make([]*pb.PeerID, len(ep.net.endpoints))
+ for i, oep := range ep.net.endpoints {
+ if nil != oep {
+ // In case this is invoked before all endpoints are initialized, this emulates a real network as well
+ network[i] = oep.getHandle()
+ }
+ }
+ return
+}
+
+// Broadcast delivers to all endpoints. In contrast to the stack
+// Broadcast, this will also deliver back to the replica. We keep
+// this behavior, because it exposes subtle bugs in the
+// implementation.
+func (ep *testEndpoint) Broadcast(msg *pb.Message, peerType pb.PeerEndpoint_Type) error {
+ ep.net.broadcastFilter(ep, msg.Payload)
+ return nil
+}
+
+func (ep *testEndpoint) Unicast(msg *pb.Message, receiverHandle *pb.PeerID) error {
+ receiverID, err := getValidatorID(receiverHandle)
+ if err != nil {
+ return fmt.Errorf("Couldn't unicast message to %s: %v", receiverHandle.Name, err)
+ }
+ internalQueueMessage(ep.net.msgs, taggedMsg{int(ep.id), int(receiverID), msg.Payload})
+ return nil
+}
+
+func internalQueueMessage(queue chan<- taggedMsg, tm taggedMsg) {
+ select {
+ case queue <- tm:
+ default:
+ fmt.Println("TEST NET: Message cannot be queued without blocking, consider increasing the queue size")
+ queue <- tm
+ }
+}
+
+func (net *testnet) debugMsg(msg string, args ...interface{}) {
+ if net.debug {
+ fmt.Printf(msg, args...)
+ }
+}
+
+func (net *testnet) broadcastFilter(ep *testEndpoint, payload []byte) {
+ select {
+ case <-net.closed:
+ fmt.Println("WARNING! Attempted to send a request to a closed network, ignoring")
+ return
+ default:
+ }
+ if net.filterFn != nil {
+ payload = net.filterFn(int(ep.id), -1, payload)
+ net.debugMsg("TEST: filtered message\n")
+ }
+ if payload != nil {
+ net.debugMsg("TEST: attempting to queue message %p\n", payload)
+ internalQueueMessage(net.msgs, taggedMsg{int(ep.id), -1, payload})
+ net.debugMsg("TEST: message queued successfully %p\n", payload)
+ } else {
+ net.debugMsg("TEST: suppressing message with payload %p\n", payload)
+ }
+}
+
+func (net *testnet) deliverFilter(msg taggedMsg) {
+ net.debugMsg("TEST: deliver\n")
+ senderHandle := net.endpoints[msg.src].getHandle()
+ if msg.dst == -1 {
+ net.debugMsg("TEST: Sending broadcast %v\n", net.endpoints)
+ wg := &sync.WaitGroup{}
+ wg.Add(len(net.endpoints))
+ for id, ep := range net.endpoints {
+ net.debugMsg("TEST: Looping broadcast %d\n", ep.getID())
+ lid := id
+ lep := ep
+ go func() {
+ defer wg.Done()
+ if msg.src == lid {
+ if net.debug {
+ net.debugMsg("TEST: Skipping local delivery %d %d\n", lid, msg.src)
+ }
+ // do not deliver to local replica
+ return
+ }
+ payload := msg.msg
+ net.debugMsg("TEST: Filtering %d\n", lid)
+ if net.filterFn != nil {
+ payload = net.filterFn(msg.src, lid, payload)
+ }
+ net.debugMsg("TEST: Delivering %d\n", lid)
+ if payload != nil {
+ net.debugMsg("TEST: Sending message %d\n", lid)
+ lep.deliver(payload, senderHandle)
+ net.debugMsg("TEST: Sent message %d\n", lid)
+ } else {
+ net.debugMsg("TEST: Message to %d was skipped\n", lid)
+ }
+ }()
+ }
+ wg.Wait()
+ } else {
+ payload := msg.msg
+ net.debugMsg("TEST: Filtering %d\n", msg.dst)
+ if net.filterFn != nil {
+ payload = net.filterFn(msg.src, msg.dst, payload)
+ }
+ if payload != nil {
+ net.debugMsg("TEST: Sending unicast\n")
+ net.endpoints[msg.dst].deliver(msg.msg, senderHandle)
+ }
+ }
+}
+
+func (net *testnet) processMessageFromChannel(msg taggedMsg, ok bool) bool {
+ if !ok {
+ net.debugMsg("TEST: message channel closed, exiting\n")
+ return false
+ }
+ net.debugMsg("TEST: new message, delivering\n")
+ net.deliverFilter(msg)
+ return true
+}
+
+func (net *testnet) process() error {
+ retry := true
+ countdown := time.After(60 * time.Second)
+ for {
+ net.debugMsg("TEST: process looping\n")
+ select {
+ case msg, ok := <-net.msgs:
+ retry = true
+ net.debugMsg("TEST: processing message without testing for idle\n")
+ if !net.processMessageFromChannel(msg, ok) {
+ return nil
+ }
+ case <-net.closed:
+ return nil
+ case <-countdown:
+ panic("Test network took more than 60 seconds to resolve requests, this usually indicates a hang")
+ default:
+ if !retry {
+ return nil
+ }
+
+ var busy []int
+ for i, ep := range net.endpoints {
+ if ep.isBusy() {
+ busy = append(busy, i)
+ }
+ }
+ if len(busy) == 0 {
+ retry = false
+ continue
+ }
+
+ net.debugMsg("TEST: some replicas are busy, waiting: %v\n", busy)
+ select {
+ case msg, ok := <-net.msgs:
+ retry = true
+ if !net.processMessageFromChannel(msg, ok) {
+ return nil
+ }
+ continue
+ case <-time.After(100 * time.Millisecond):
+ continue
+ }
+ }
+ }
+}
+
+func (net *testnet) processContinually() {
+ for {
+ select {
+ case msg, ok := <-net.msgs:
+ if !net.processMessageFromChannel(msg, ok) {
+ return
+ }
+ case <-net.closed:
+ return
+ }
+ }
+}
+
+func makeTestnet(N int, initFn func(id uint64, network *testnet) endpoint) *testnet {
+ net := &testnet{}
+ net.msgs = make(chan taggedMsg, 100)
+ net.closed = make(chan struct{})
+ net.endpoints = make([]endpoint, N)
+
+ for i := range net.endpoints {
+ net.endpoints[i] = initFn(uint64(i), net)
+ }
+
+ return net
+}
+
+func (net *testnet) clearMessages() {
+ for {
+ select {
+ case <-net.msgs:
+ default:
+ return
+ }
+ }
+}
+
+func (net *testnet) stop() {
+ close(net.closed)
+ for _, ep := range net.endpoints {
+ ep.stop()
+ }
+}
diff --git a/consensus/pbft/mock_utilities_test.go b/consensus/pbft/mock_utilities_test.go
new file mode 100644
index 00000000000..21a793c5ac9
--- /dev/null
+++ b/consensus/pbft/mock_utilities_test.go
@@ -0,0 +1,616 @@
+/*
+Copyright IBM Corp. 2016 All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package pbft
+
+import (
+ "fmt"
+ "math/rand"
+ "time"
+
+ "github.com/golang/protobuf/proto"
+ "github.com/hyperledger/fabric/consensus/util/events"
+ "github.com/hyperledger/fabric/core/ledger/statemgmt"
+
+ gp "google/protobuf"
+
+ pb "github.com/hyperledger/fabric/protos"
+ "github.com/spf13/viper"
+)
+
+type inertTimer struct{}
+
+func (it *inertTimer) Halt() {}
+func (it *inertTimer) Reset(duration time.Duration, event events.Event) {}
+func (it *inertTimer) SoftReset(duration time.Duration, event events.Event) {}
+func (it *inertTimer) Stop() {}
+
+type inertTimerFactory struct{}
+
+func (it *inertTimerFactory) CreateTimer() events.Timer {
+ return &inertTimer{}
+}
+
+type noopSecurity struct{}
+
+func (ns *noopSecurity) Sign(msg []byte) ([]byte, error) {
+ return nil, nil
+}
+
+func (ns *noopSecurity) Verify(peerID *pb.PeerID, signature []byte, message []byte) error {
+ return nil
+}
+
+type mockPersist struct {
+ store map[string][]byte
+}
+
+func (p *mockPersist) initialize() {
+ if p.store == nil {
+ p.store = make(map[string][]byte)
+ }
+}
+
+func (p *mockPersist) ReadState(key string) ([]byte, error) {
+ p.initialize()
+ if val, ok := p.store[key]; ok {
+ return val, nil
+ }
+ return nil, fmt.Errorf("cannot find key %s", key)
+}
+
+func (p *mockPersist) ReadStateSet(prefix string) (map[string][]byte, error) {
+ if p.store == nil {
+ return nil, fmt.Errorf("no state yet")
+ }
+ ret := make(map[string][]byte)
+ for k, v := range p.store {
+ if len(k) >= len(prefix) && k[0:len(prefix)] == prefix {
+ ret[k] = v
+ }
+ }
+ return ret, nil
+}
+
+func (p *mockPersist) StoreState(key string, value []byte) error {
+ p.initialize()
+ p.store[key] = value
+ return nil
+}
+
+func (p *mockPersist) DelState(key string) {
+ p.initialize()
+ delete(p.store, key)
+}
+
+func createRunningPbftWithManager(id uint64, config *viper.Viper, stack innerStack) (*pbftCore, events.Manager) {
+ manager := events.NewManagerImpl()
+ core := newPbftCore(id, loadConfig(), stack, events.NewTimerFactoryImpl(manager))
+ manager.SetReceiver(core)
+ manager.Start()
+ return core, manager
+}
+
+func createTx(tag int64) (tx *pb.Transaction) {
+ txTime := &gp.Timestamp{Seconds: tag, Nanos: 0}
+ tx = &pb.Transaction{Type: pb.Transaction_CHAINCODE_DEPLOY,
+ Timestamp: txTime,
+ Payload: []byte(fmt.Sprint(tag)),
+ }
+ return
+}
+
+func marshalTx(tx *pb.Transaction) (txPacked []byte) {
+ txPacked, _ = proto.Marshal(tx)
+ return
+}
+
+func createTxMsg(tag int64) (msg *pb.Message) {
+ tx := createTx(tag)
+ txPacked := marshalTx(tx)
+ msg = &pb.Message{
+ Type: pb.Message_CHAIN_TRANSACTION,
+ Payload: txPacked,
+ }
+ return
+}
+
+func createPbftReq(tag int64, replica uint64) (req *Request) {
+ tx := createTx(tag)
+ txPacked := marshalTx(tx)
+ req = &Request{
+ Timestamp: tx.GetTimestamp(),
+ ReplicaId: replica,
+ Payload: txPacked,
+ }
+ return
+}
+
+func createPbftReqBatch(tag int64, replica uint64) (reqBatch *RequestBatch) {
+ req := createPbftReq(tag, replica)
+ reqBatch = &RequestBatch{Batch: []*Request{req}}
+ return
+}
+
+func createPbftReqBatchMsg(tag int64, replica uint64) (msg *Message) {
+ reqBatch := createPbftReqBatch(tag, replica)
+ msg = &Message{Payload: &Message_RequestBatch{RequestBatch: reqBatch}}
+ return
+}
+
+func generateBroadcaster(validatorCount int) (requestBroadcaster int) {
+ seed := rand.NewSource(time.Now().UnixNano())
+ rndm := rand.New(seed)
+ requestBroadcaster = rndm.Intn(validatorCount)
+ return
+}
+
+type omniProto struct {
+ // Stack methods
+ GetNetworkInfoImpl func() (self *pb.PeerEndpoint, network []*pb.PeerEndpoint, err error)
+ GetNetworkHandlesImpl func() (self *pb.PeerID, network []*pb.PeerID, err error)
+ BroadcastImpl func(msg *pb.Message, peerType pb.PeerEndpoint_Type) error
+ UnicastImpl func(msg *pb.Message, receiverHandle *pb.PeerID) error
+ SignImpl func(msg []byte) ([]byte, error)
+ VerifyImpl func(peerID *pb.PeerID, signature []byte, message []byte) error
+ GetBlockImpl func(id uint64) (block *pb.Block, err error)
+ GetCurrentStateHashImpl func() (stateHash []byte, err error)
+ GetBlockchainSizeImpl func() uint64
+ GetBlockHeadMetadataImpl func() ([]byte, error)
+ GetBlockchainInfoImpl func() *pb.BlockchainInfo
+ GetBlockchainInfoBlobImpl func() []byte
+ HashBlockImpl func(block *pb.Block) ([]byte, error)
+ VerifyBlockchainImpl func(start, finish uint64) (uint64, error)
+ PutBlockImpl func(blockNumber uint64, block *pb.Block) error
+ ApplyStateDeltaImpl func(id interface{}, delta *statemgmt.StateDelta) error
+ CommitStateDeltaImpl func(id interface{}) error
+ RollbackStateDeltaImpl func(id interface{}) error
+ EmptyStateImpl func() error
+ ExecuteImpl func(id interface{}, txs []*pb.Transaction)
+ CommitImpl func(id interface{}, meta []byte)
+ RollbackImpl func(id interface{})
+ UpdateStateImpl func(id interface{}, target *pb.BlockchainInfo, peers []*pb.PeerID)
+ BeginTxBatchImpl func(id interface{}) error
+ ExecTxsImpl func(id interface{}, txs []*pb.Transaction) ([]byte, error)
+ CommitTxBatchImpl func(id interface{}, metadata []byte) (*pb.Block, error)
+ RollbackTxBatchImpl func(id interface{}) error
+ PreviewCommitTxBatchImpl func(id interface{}, metadata []byte) ([]byte, error)
+ GetRemoteBlocksImpl func(replicaID *pb.PeerID, start, finish uint64) (<-chan *pb.SyncBlocks, error)
+ GetRemoteStateSnapshotImpl func(replicaID *pb.PeerID) (<-chan *pb.SyncStateSnapshot, error)
+ GetRemoteStateDeltasImpl func(replicaID *pb.PeerID, start, finish uint64) (<-chan *pb.SyncStateDeltas, error)
+ ReadStateImpl func(key string) ([]byte, error)
+ ReadStateSetImpl func(prefix string) (map[string][]byte, error)
+ StoreStateImpl func(key string, value []byte) error
+ DelStateImpl func(key string)
+ ValidateStateImpl func()
+ InvalidateStateImpl func()
+
+ // Inner Stack methods
+ broadcastImpl func(msgPayload []byte)
+ unicastImpl func(msgPayload []byte, receiverID uint64) (err error)
+ executeImpl func(seqNo uint64, reqBatch *RequestBatch)
+ getStateImpl func() []byte
+ skipToImpl func(seqNo uint64, snapshotID []byte, peers []uint64)
+ viewChangeImpl func(curView uint64)
+ signImpl func(msg []byte) ([]byte, error)
+ verifyImpl func(senderID uint64, signature []byte, message []byte) error
+ getLastSeqNoImpl func() (uint64, error)
+ validateStateImpl func()
+ invalidateStateImpl func()
+
+ // Closable Consenter methods
+ RecvMsgImpl func(ocMsg *pb.Message, senderHandle *pb.PeerID) error
+ CloseImpl func()
+ deliverImpl func([]byte, *pb.PeerID)
+
+ // Orderer methods
+ ValidateImpl func(seqNo uint64, id []byte) (commit bool, correctedID []byte, peerIDs []*pb.PeerID)
+ SkipToImpl func(seqNo uint64, id []byte, peers []*pb.PeerID)
+}
+
+func (op *omniProto) GetNetworkInfo() (self *pb.PeerEndpoint, network []*pb.PeerEndpoint, err error) {
+ if nil != op.GetNetworkInfoImpl {
+ return op.GetNetworkInfoImpl()
+ }
+
+ panic("Unimplemented")
+}
+func (op *omniProto) GetNetworkHandles() (self *pb.PeerID, network []*pb.PeerID, err error) {
+ if nil != op.GetNetworkHandlesImpl {
+ return op.GetNetworkHandlesImpl()
+ }
+
+ panic("Unimplemented")
+}
+func (op *omniProto) Broadcast(msg *pb.Message, peerType pb.PeerEndpoint_Type) error {
+ if nil != op.BroadcastImpl {
+ return op.BroadcastImpl(msg, peerType)
+ }
+
+ panic("Unimplemented")
+}
+func (op *omniProto) Unicast(msg *pb.Message, receiverHandle *pb.PeerID) error {
+ if nil != op.UnicastImpl {
+ return op.UnicastImpl(msg, receiverHandle)
+ }
+
+ panic("Unimplemented")
+}
+func (op *omniProto) Sign(msg []byte) ([]byte, error) {
+ if nil != op.SignImpl {
+ return op.SignImpl(msg)
+ }
+
+ panic("Unimplemented")
+}
+func (op *omniProto) Verify(peerID *pb.PeerID, signature []byte, message []byte) error {
+ if nil != op.VerifyImpl {
+ return op.VerifyImpl(peerID, signature, message)
+ }
+
+ panic("Unimplemented")
+}
+func (op *omniProto) GetBlock(id uint64) (block *pb.Block, err error) {
+ if nil != op.GetBlockImpl {
+ return op.GetBlockImpl(id)
+ }
+
+ panic("Unimplemented")
+}
+func (op *omniProto) GetCurrentStateHash() (stateHash []byte, err error) {
+ if nil != op.GetCurrentStateHashImpl {
+ return op.GetCurrentStateHashImpl()
+ }
+
+ panic("Unimplemented")
+}
+func (op *omniProto) GetBlockchainSize() uint64 {
+ if nil != op.GetBlockchainSizeImpl {
+ return op.GetBlockchainSizeImpl()
+ }
+
+ panic("Unimplemented")
+}
+func (op *omniProto) GetBlockHeadMetadata() ([]byte, error) {
+ if nil != op.GetBlockHeadMetadataImpl {
+ return op.GetBlockHeadMetadataImpl()
+ }
+
+ return nil, nil
+}
+func (op *omniProto) GetBlockchainInfoBlob() []byte {
+ if nil != op.GetBlockchainInfoBlobImpl {
+ return op.GetBlockchainInfoBlobImpl()
+ }
+
+ panic("Unimplemented")
+}
+func (op *omniProto) GetBlockchainInfo() *pb.BlockchainInfo {
+ if nil != op.GetBlockchainInfoImpl {
+ return op.GetBlockchainInfoImpl()
+ }
+
+ panic("Unimplemented")
+}
+func (op *omniProto) HashBlock(block *pb.Block) ([]byte, error) {
+ if nil != op.HashBlockImpl {
+ return op.HashBlockImpl(block)
+ }
+
+ panic("Unimplemented")
+}
+func (op *omniProto) VerifyBlockchain(start, finish uint64) (uint64, error) {
+ if nil != op.VerifyBlockchainImpl {
+ return op.VerifyBlockchainImpl(start, finish)
+ }
+
+ panic("Unimplemented")
+}
+func (op *omniProto) PutBlock(blockNumber uint64, block *pb.Block) error {
+ if nil != op.PutBlockImpl {
+ return op.PutBlockImpl(blockNumber, block)
+ }
+
+ panic("Unimplemented")
+}
+func (op *omniProto) ApplyStateDelta(id interface{}, delta *statemgmt.StateDelta) error {
+ if nil != op.ApplyStateDeltaImpl {
+ return op.ApplyStateDeltaImpl(id, delta)
+ }
+
+ panic("Unimplemented")
+}
+func (op *omniProto) CommitStateDelta(id interface{}) error {
+ if nil != op.CommitStateDeltaImpl {
+ return op.CommitStateDeltaImpl(id)
+ }
+
+ panic("Unimplemented")
+}
+func (op *omniProto) RollbackStateDelta(id interface{}) error {
+ if nil != op.RollbackStateDeltaImpl {
+ return op.RollbackStateDeltaImpl(id)
+ }
+
+ panic("Unimplemented")
+}
+func (op *omniProto) EmptyState() error {
+ if nil != op.EmptyStateImpl {
+ return op.EmptyStateImpl()
+ }
+
+ panic("Unimplemented")
+}
+func (op *omniProto) BeginTxBatch(id interface{}) error {
+ if nil != op.BeginTxBatchImpl {
+ return op.BeginTxBatchImpl(id)
+ }
+
+ panic("Unimplemented")
+}
+func (op *omniProto) ExecTxs(id interface{}, txs []*pb.Transaction) ([]byte, error) {
+ if nil != op.ExecTxsImpl {
+ return op.ExecTxsImpl(id, txs)
+ }
+
+ panic("Unimplemented")
+}
+func (op *omniProto) CommitTxBatch(id interface{}, metadata []byte) (*pb.Block, error) {
+ if nil != op.CommitTxBatchImpl {
+ return op.CommitTxBatchImpl(id, metadata)
+ }
+
+ panic("Unimplemented")
+}
+func (op *omniProto) RollbackTxBatch(id interface{}) error {
+ if nil != op.RollbackTxBatchImpl {
+ return op.RollbackTxBatchImpl(id)
+ }
+
+ panic("Unimplemented")
+}
+func (op *omniProto) PreviewCommitTxBatch(id interface{}, metadata []byte) ([]byte, error) {
+ if nil != op.PreviewCommitTxBatchImpl {
+ return op.PreviewCommitTxBatchImpl(id, metadata)
+ }
+
+ panic("Unimplemented")
+}
+func (op *omniProto) GetRemoteBlocks(replicaID *pb.PeerID, start, finish uint64) (<-chan *pb.SyncBlocks, error) {
+ if nil != op.GetRemoteBlocksImpl {
+ return op.GetRemoteBlocksImpl(replicaID, start, finish)
+ }
+
+ panic("Unimplemented")
+}
+func (op *omniProto) GetRemoteStateSnapshot(replicaID *pb.PeerID) (<-chan *pb.SyncStateSnapshot, error) {
+ if nil != op.GetRemoteStateSnapshotImpl {
+ return op.GetRemoteStateSnapshotImpl(replicaID)
+ }
+
+ panic("Unimplemented")
+}
+func (op *omniProto) GetRemoteStateDeltas(replicaID *pb.PeerID, start, finish uint64) (<-chan *pb.SyncStateDeltas, error) {
+ if nil != op.GetRemoteStateDeltasImpl {
+ return op.GetRemoteStateDeltasImpl(replicaID, start, finish)
+ }
+
+ panic("Unimplemented")
+}
+
+func (op *omniProto) broadcast(msgPayload []byte) {
+ if nil != op.broadcastImpl {
+ op.broadcastImpl(msgPayload)
+ return
+ }
+
+ panic("Unimplemented")
+}
+func (op *omniProto) unicast(msgPayload []byte, receiverID uint64) (err error) {
+ if nil != op.unicastImpl {
+ return op.unicastImpl(msgPayload, receiverID)
+ }
+
+ panic("Unimplemented")
+}
+func (op *omniProto) execute(seqNo uint64, reqBatch *RequestBatch) {
+ if nil != op.executeImpl {
+ op.executeImpl(seqNo, reqBatch)
+ return
+ }
+
+ panic("Unimplemented")
+}
+func (op *omniProto) skipTo(seqNo uint64, snapshotID []byte, peers []uint64) {
+ if nil != op.skipToImpl {
+ op.skipToImpl(seqNo, snapshotID, peers)
+ return
+ }
+
+ panic("Unimplemented")
+}
+func (op *omniProto) viewChange(curView uint64) {
+ if nil != op.viewChangeImpl {
+ op.viewChangeImpl(curView)
+ return
+ }
+
+ panic("Unimplemented")
+}
+func (op *omniProto) sign(msg []byte) ([]byte, error) {
+ if nil != op.signImpl {
+ return op.signImpl(msg)
+ }
+
+ panic("Unimplemented")
+}
+func (op *omniProto) verify(senderID uint64, signature []byte, message []byte) error {
+ if nil != op.verifyImpl {
+ return op.verifyImpl(senderID, signature, message)
+ }
+
+ panic("Unimplemented")
+}
+
+func (op *omniProto) RecvMsg(ocMsg *pb.Message, senderHandle *pb.PeerID) error {
+ if nil != op.RecvMsgImpl {
+ return op.RecvMsgImpl(ocMsg, senderHandle)
+ }
+
+ panic("Unimplemented")
+}
+
+func (op *omniProto) getLastSeqNo() (uint64, error) {
+ if op.getLastSeqNoImpl != nil {
+ return op.getLastSeqNoImpl()
+ }
+
+ return 0, fmt.Errorf("getLastSeqNo is not implemented")
+}
+
+func (op *omniProto) Close() {
+ if nil != op.CloseImpl {
+ op.CloseImpl()
+ return
+ }
+
+ panic("Unimplemented")
+}
+
+func (op *omniProto) Validate(seqNo uint64, id []byte) (commit bool, correctedID []byte, peerIDs []*pb.PeerID) {
+ if nil != op.ValidateImpl {
+ return op.ValidateImpl(seqNo, id)
+ }
+
+ panic("Unimplemented")
+
+}
+
+func (op *omniProto) SkipTo(seqNo uint64, meta []byte, id []*pb.PeerID) {
+ if nil != op.SkipToImpl {
+ op.SkipToImpl(seqNo, meta, id)
+ return
+ }
+
+ panic("Unimplemented")
+}
+
+func (op *omniProto) deliver(msg []byte, target *pb.PeerID) {
+ if nil != op.deliverImpl {
+ op.deliverImpl(msg, target)
+ }
+
+ panic("Unimplemented")
+}
+
+func (op *omniProto) getState() []byte {
+ if nil != op.getStateImpl {
+ return op.getStateImpl()
+ }
+
+ panic("Unimplemented")
+}
+
+func (op *omniProto) ReadState(key string) ([]byte, error) {
+ if nil != op.ReadStateImpl {
+ return op.ReadStateImpl(key)
+ }
+ return nil, fmt.Errorf("unimplemented")
+}
+
+func (op *omniProto) ReadStateSet(prefix string) (map[string][]byte, error) {
+ if nil != op.ReadStateImpl {
+ return op.ReadStateSetImpl(prefix)
+ }
+ return nil, fmt.Errorf("unimplemented")
+}
+
+func (op *omniProto) DelState(key string) {
+ if nil != op.DelStateImpl {
+ op.DelStateImpl(key)
+ }
+}
+
+func (op *omniProto) StoreState(key string, value []byte) error {
+ if nil != op.StoreStateImpl {
+ return op.StoreStateImpl(key, value)
+ }
+ return fmt.Errorf("unimplemented")
+}
+
+func (op *omniProto) ValidateState() {
+ if nil != op.ValidateStateImpl {
+ op.ValidateStateImpl()
+ return
+ }
+ panic("unimplemented")
+}
+
+func (op *omniProto) InvalidateState() {
+ if nil != op.InvalidateStateImpl {
+ op.InvalidateStateImpl()
+ return
+ }
+ panic("unimplemented")
+}
+
+func (op *omniProto) validateState() {
+ if nil != op.validateStateImpl {
+ op.validateStateImpl()
+ return
+ }
+ panic("unimplemented")
+}
+
+func (op *omniProto) invalidateState() {
+ if nil != op.invalidateStateImpl {
+ op.invalidateStateImpl()
+ return
+ }
+ panic("unimplemented")
+}
+func (op *omniProto) Commit(tag interface{}, meta []byte) {
+ if nil != op.CommitImpl {
+ op.CommitImpl(tag, meta)
+ return
+ }
+ panic("unimplemented")
+}
+func (op *omniProto) UpdateState(tag interface{}, target *pb.BlockchainInfo, peers []*pb.PeerID) {
+ if nil != op.UpdateStateImpl {
+ op.UpdateStateImpl(tag, target, peers)
+ return
+ }
+ panic("unimplemented")
+}
+func (op *omniProto) Rollback(tag interface{}) {
+ if nil != op.RollbackImpl {
+ op.RollbackImpl(tag)
+ return
+ }
+ panic("unimplemented")
+}
+func (op *omniProto) Execute(tag interface{}, txs []*pb.Transaction) {
+ if nil != op.ExecuteImpl {
+ op.ExecuteImpl(tag, txs)
+ return
+ }
+ panic("unimplemented")
+}
+
+// These methods are a temporary hack until the consensus API can be cleaned a little
+func (op *omniProto) Start() {}
+func (op *omniProto) Halt() {}
diff --git a/consensus/pbft/pbft-core.go b/consensus/pbft/pbft-core.go
new file mode 100644
index 00000000000..0a9405896ae
--- /dev/null
+++ b/consensus/pbft/pbft-core.go
@@ -0,0 +1,1346 @@
+/*
+Copyright IBM Corp. 2016 All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package pbft
+
+import (
+ "encoding/base64"
+ "fmt"
+ "math/rand"
+ "sort"
+ "sync"
+ "time"
+
+ "github.com/hyperledger/fabric/consensus"
+ "github.com/hyperledger/fabric/consensus/util/events"
+ _ "github.com/hyperledger/fabric/core" // Needed for logging format init
+ "github.com/op/go-logging"
+
+ "github.com/golang/protobuf/proto"
+ "github.com/spf13/viper"
+)
+
+// =============================================================================
+// init
+// =============================================================================
+
+var logger *logging.Logger // package-level logger
+
+func init() {
+ logger = logging.MustGetLogger("consensus/pbft")
+}
+
+const (
+ // UnreasonableTimeout is an ugly thing, we need to create timers, then stop them before they expire, so use a large timeout
+ UnreasonableTimeout = 100 * time.Hour
+)
+
+// =============================================================================
+// custom interfaces and structure definitions
+// =============================================================================
+
+// Event Types
+
+// workEvent is a temporary type, to inject work
+type workEvent func()
+
+// viewChangeTimerEvent is sent when the view change timer expires
+type viewChangeTimerEvent struct{}
+
+// execDoneEvent is sent when an execution completes
+type execDoneEvent struct{}
+
+// pbftMessageEvent is sent when a consensus messages is received to be sent to pbft
+type pbftMessageEvent pbftMessage
+
+// viewChangedEvent is sent when the view change timer expires
+type viewChangedEvent struct{}
+
+// viewChangeResendTimerEvent is sent when the view change resend timer expires
+type viewChangeResendTimerEvent struct{}
+
+// returnRequestBatchEvent is sent by pbft when we are forwarded a request
+type returnRequestBatchEvent *RequestBatch
+
+// nullRequestEvent provides "keep-alive" null requests
+type nullRequestEvent struct{}
+
+// Unless otherwise noted, all methods consume the PBFT thread, and should therefore
+// not rely on PBFT accomplishing any work while that thread is being held
+type innerStack interface {
+ broadcast(msgPayload []byte)
+ unicast(msgPayload []byte, receiverID uint64) (err error)
+ execute(seqNo uint64, reqBatch *RequestBatch) // This is invoked on a separate thread
+ getState() []byte
+ getLastSeqNo() (uint64, error)
+ skipTo(seqNo uint64, snapshotID []byte, peers []uint64)
+
+ sign(msg []byte) ([]byte, error)
+ verify(senderID uint64, signature []byte, message []byte) error
+
+ invalidateState()
+ validateState()
+
+ consensus.StatePersistor
+}
+
+// This structure is used for incoming PBFT bound messages
+type pbftMessage struct {
+ sender uint64
+ msg *Message
+}
+
+type checkpointMessage struct {
+ seqNo uint64
+ id []byte
+}
+
+type stateUpdateTarget struct {
+ checkpointMessage
+ replicas []uint64
+}
+
+type pbftCore struct {
+ // internal data
+ internalLock sync.Mutex
+ executing bool // signals that application is executing
+
+ idleChan chan struct{} // Used to detect idleness for testing
+ injectChan chan func() // Used as a hack to inject work onto the PBFT thread, to be removed eventually
+
+ consumer innerStack
+
+ // PBFT data
+ activeView bool // view change happening
+ byzantine bool // whether this node is intentionally acting as Byzantine; useful for debugging on the testnet
+ f int // max. number of faults we can tolerate
+ N int // max.number of validators in the network
+ h uint64 // low watermark
+ id uint64 // replica ID; PBFT `i`
+ K uint64 // checkpoint period
+ logMultiplier uint64 // use this value to calculate log size : k*logMultiplier
+ L uint64 // log size
+ lastExec uint64 // last request we executed
+ replicaCount int // number of replicas; PBFT `|R|`
+ seqNo uint64 // PBFT "n", strictly monotonic increasing sequence number
+ view uint64 // current view
+ chkpts map[uint64]string // state checkpoints; map lastExec to global hash
+ pset map[uint64]*ViewChange_PQ
+ qset map[qidx]*ViewChange_PQ
+
+ skipInProgress bool // Set when we have detected a fall behind scenario until we pick a new starting point
+ stateTransferring bool // Set when state transfer is executing
+ highStateTarget *stateUpdateTarget // Set to the highest weak checkpoint cert we have observed
+ hChkpts map[uint64]uint64 // highest checkpoint sequence number observed for each replica
+
+ currentExec *uint64 // currently executing request
+ timerActive bool // is the timer running?
+ vcResendTimer events.Timer // timer triggering resend of a view change
+ newViewTimer events.Timer // timeout triggering a view change
+ requestTimeout time.Duration // progress timeout for requests
+ vcResendTimeout time.Duration // timeout before resending view change
+ newViewTimeout time.Duration // progress timeout for new views
+ newViewTimerReason string // what triggered the timer
+ lastNewViewTimeout time.Duration // last timeout we used during this view change
+ outstandingReqBatches map[string]*RequestBatch // track whether we are waiting for request batches to execute
+
+ nullRequestTimer events.Timer // timeout triggering a null request
+ nullRequestTimeout time.Duration // duration for this timeout
+ viewChangePeriod uint64 // period between automatic view changes
+ viewChangeSeqNo uint64 // next seqNo to perform view change
+
+ missingReqBatches map[string]bool // for all the assigned, non-checkpointed request batches we might be missing during view-change
+
+ // implementation of PBFT `in`
+ reqBatchStore map[string]*RequestBatch // track request batches
+ certStore map[msgID]*msgCert // track quorum certificates for requests
+ checkpointStore map[Checkpoint]bool // track checkpoints as set
+ viewChangeStore map[vcidx]*ViewChange // track view-change messages
+ newViewStore map[uint64]*NewView // track last new-view we received or sent
+}
+
+type qidx struct {
+ d string
+ n uint64
+}
+
+type msgID struct { // our index through certStore
+ v uint64
+ n uint64
+}
+
+type msgCert struct {
+ digest string
+ prePrepare *PrePrepare
+ sentPrepare bool
+ prepare []*Prepare
+ sentCommit bool
+ commit []*Commit
+}
+
+type vcidx struct {
+ v uint64
+ id uint64
+}
+
+type sortableUint64Slice []uint64
+
+func (a sortableUint64Slice) Len() int {
+ return len(a)
+}
+func (a sortableUint64Slice) Swap(i, j int) {
+ a[i], a[j] = a[j], a[i]
+}
+func (a sortableUint64Slice) Less(i, j int) bool {
+ return a[i] < a[j]
+}
+
+// =============================================================================
+// constructors
+// =============================================================================
+
+func newPbftCore(id uint64, config *viper.Viper, consumer innerStack, etf events.TimerFactory) *pbftCore {
+ var err error
+ instance := &pbftCore{}
+ instance.id = id
+ instance.consumer = consumer
+
+ instance.newViewTimer = etf.CreateTimer()
+ instance.vcResendTimer = etf.CreateTimer()
+ instance.nullRequestTimer = etf.CreateTimer()
+
+ instance.N = config.GetInt("general.N")
+ instance.f = config.GetInt("general.f")
+ if instance.f*3+1 > instance.N {
+ panic(fmt.Sprintf("need at least %d enough replicas to tolerate %d byzantine faults, but only %d replicas configured", instance.f*3+1, instance.f, instance.N))
+ }
+
+ instance.K = uint64(config.GetInt("general.K"))
+
+ instance.logMultiplier = uint64(config.GetInt("general.logmultiplier"))
+ if instance.logMultiplier < 2 {
+ panic("Log multiplier must be greater than or equal to 2")
+ }
+ instance.L = instance.logMultiplier * instance.K // log size
+ instance.viewChangePeriod = uint64(config.GetInt("general.viewchangeperiod"))
+
+ instance.byzantine = config.GetBool("general.byzantine")
+
+ instance.requestTimeout, err = time.ParseDuration(config.GetString("general.timeout.request"))
+ if err != nil {
+ panic(fmt.Errorf("Cannot parse request timeout: %s", err))
+ }
+ instance.vcResendTimeout, err = time.ParseDuration(config.GetString("general.timeout.resendviewchange"))
+ if err != nil {
+ panic(fmt.Errorf("Cannot parse request timeout: %s", err))
+ }
+ instance.newViewTimeout, err = time.ParseDuration(config.GetString("general.timeout.viewchange"))
+ if err != nil {
+ panic(fmt.Errorf("Cannot parse new view timeout: %s", err))
+ }
+ instance.nullRequestTimeout, err = time.ParseDuration(config.GetString("general.timeout.nullrequest"))
+ if err != nil {
+ instance.nullRequestTimeout = 0
+ }
+
+ instance.activeView = true
+ instance.replicaCount = instance.N
+
+ logger.Infof("PBFT type = %T", instance.consumer)
+ logger.Infof("PBFT Max number of validating peers (N) = %v", instance.N)
+ logger.Infof("PBFT Max number of failing peers (f) = %v", instance.f)
+ logger.Infof("PBFT byzantine flag = %v", instance.byzantine)
+ logger.Infof("PBFT request timeout = %v", instance.requestTimeout)
+ logger.Infof("PBFT view change timeout = %v", instance.newViewTimeout)
+ logger.Infof("PBFT Checkpoint period (K) = %v", instance.K)
+ logger.Infof("PBFT Log multiplier = %v", instance.logMultiplier)
+ logger.Infof("PBFT log size (L) = %v", instance.L)
+ if instance.nullRequestTimeout > 0 {
+ logger.Infof("PBFT null requests timeout = %v", instance.nullRequestTimeout)
+ } else {
+ logger.Infof("PBFT null requests disabled")
+ }
+ if instance.viewChangePeriod > 0 {
+ logger.Infof("PBFT view change period = %v", instance.viewChangePeriod)
+ } else {
+ logger.Infof("PBFT automatic view change disabled")
+ }
+
+ // init the logs
+ instance.certStore = make(map[msgID]*msgCert)
+ instance.reqBatchStore = make(map[string]*RequestBatch)
+ instance.checkpointStore = make(map[Checkpoint]bool)
+ instance.chkpts = make(map[uint64]string)
+ instance.viewChangeStore = make(map[vcidx]*ViewChange)
+ instance.pset = make(map[uint64]*ViewChange_PQ)
+ instance.qset = make(map[qidx]*ViewChange_PQ)
+ instance.newViewStore = make(map[uint64]*NewView)
+
+ // initialize state transfer
+ instance.hChkpts = make(map[uint64]uint64)
+
+ instance.chkpts[0] = "XXX GENESIS"
+
+ instance.lastNewViewTimeout = instance.newViewTimeout
+ instance.outstandingReqBatches = make(map[string]*RequestBatch)
+ instance.missingReqBatches = make(map[string]bool)
+
+ instance.restoreState()
+
+ instance.viewChangeSeqNo = ^uint64(0) // infinity
+ instance.updateViewChangeSeqNo()
+
+ return instance
+}
+
+// close tears down resources opened by newPbftCore
+func (instance *pbftCore) close() {
+ instance.newViewTimer.Halt()
+ instance.nullRequestTimer.Halt()
+}
+
+// allow the view-change protocol to kick-off when the timer expires
+func (instance *pbftCore) ProcessEvent(e events.Event) events.Event {
+ var err error
+ logger.Debugf("Replica %d processing event", instance.id)
+ switch et := e.(type) {
+ case viewChangeTimerEvent:
+ logger.Infof("Replica %d view change timer expired, sending view change: %s", instance.id, instance.newViewTimerReason)
+ instance.timerActive = false
+ instance.sendViewChange()
+ case *pbftMessage:
+ return pbftMessageEvent(*et)
+ case pbftMessageEvent:
+ msg := et
+ logger.Debugf("Replica %d received incoming message from %v", instance.id, msg.sender)
+ next, err := instance.recvMsg(msg.msg, msg.sender)
+ if err != nil {
+ break
+ }
+ return next
+ case *RequestBatch:
+ err = instance.recvRequestBatch(et)
+ case *PrePrepare:
+ err = instance.recvPrePrepare(et)
+ case *Prepare:
+ err = instance.recvPrepare(et)
+ case *Commit:
+ err = instance.recvCommit(et)
+ case *Checkpoint:
+ return instance.recvCheckpoint(et)
+ case *ViewChange:
+ return instance.recvViewChange(et)
+ case *NewView:
+ return instance.recvNewView(et)
+ case *FetchRequestBatch:
+ err = instance.recvFetchRequestBatch(et)
+ case returnRequestBatchEvent:
+ return instance.recvReturnRequestBatch(et)
+ case stateUpdatedEvent:
+ update := et.chkpt
+ instance.stateTransferring = false
+ // If state transfer did not complete successfully, or if it did not reach our low watermark, do it again
+ if et.target == nil || update.seqNo < instance.h {
+ if et.target == nil {
+ logger.Warningf("Replica %d attempted state transfer target was not reachable (%v)", instance.id, et.chkpt)
+ } else {
+ logger.Warningf("Replica %d recovered to seqNo %d but our low watermark has moved to %d", instance.id, update.seqNo, instance.h)
+ }
+ if instance.highStateTarget == nil {
+ logger.Debugf("Replica %d has no state targets, cannot resume state transfer yet", instance.id)
+ } else if update.seqNo < instance.highStateTarget.seqNo {
+ logger.Debugf("Replica %d has state target for %d, transferring", instance.id, instance.highStateTarget.seqNo)
+ instance.retryStateTransfer(nil)
+ } else {
+ logger.Debugf("Replica %d has no state target above %d, highest is %d", instance.id, update.seqNo, instance.highStateTarget.seqNo)
+ }
+ return nil
+ }
+ logger.Infof("Replica %d application caught up via state transfer, lastExec now %d", instance.id, update.seqNo)
+ // XXX create checkpoint
+ instance.lastExec = update.seqNo
+ instance.moveWatermarks(instance.lastExec) // The watermark movement handles moving this to a checkpoint boundary
+ instance.skipInProgress = false
+ instance.consumer.validateState()
+ instance.executeOutstanding()
+ case execDoneEvent:
+ instance.execDoneSync()
+ if instance.skipInProgress {
+ instance.retryStateTransfer(nil)
+ }
+ // We will delay new view processing sometimes
+ return instance.processNewView()
+ case nullRequestEvent:
+ instance.nullRequestHandler()
+ case workEvent:
+ et() // Used to allow the caller to steal use of the main thread, to be removed
+ case viewChangeQuorumEvent:
+ logger.Debugf("Replica %d received view change quorum, processing new view", instance.id)
+ if instance.primary(instance.view) == instance.id {
+ return instance.sendNewView()
+ }
+ return instance.processNewView()
+ case viewChangedEvent:
+ // No-op, processed by plugins if needed
+ case viewChangeResendTimerEvent:
+ if instance.activeView {
+ logger.Warningf("Replica %d had its view change resend timer expire but it's in an active view, this is benign but may indicate a bug", instance.id)
+ return nil
+ }
+ logger.Debugf("Replica %d view change resend timer expired before view change quorum was reached, resending", instance.id)
+ instance.view-- // sending the view change increments this
+ return instance.sendViewChange()
+ default:
+ logger.Warningf("Replica %d received an unknown message type %T", instance.id, et)
+ }
+
+ if err != nil {
+ logger.Warning(err.Error())
+ }
+
+ return nil
+}
+
+// =============================================================================
+// helper functions for PBFT
+// =============================================================================
+
+// Given a certain view n, what is the expected primary?
+func (instance *pbftCore) primary(n uint64) uint64 {
+ return n % uint64(instance.replicaCount)
+}
+
+// Is the sequence number between watermarks?
+func (instance *pbftCore) inW(n uint64) bool {
+ return n-instance.h > 0 && n-instance.h <= instance.L
+}
+
+// Is the view right? And is the sequence number between watermarks?
+func (instance *pbftCore) inWV(v uint64, n uint64) bool {
+ return instance.view == v && instance.inW(n)
+}
+
+// Given a digest/view/seq, is there an entry in the certLog?
+// If so, return it. If not, create it.
+func (instance *pbftCore) getCert(v uint64, n uint64) (cert *msgCert) {
+ idx := msgID{v, n}
+ cert, ok := instance.certStore[idx]
+ if ok {
+ return
+ }
+
+ cert = &msgCert{}
+ instance.certStore[idx] = cert
+ return
+}
+
+// =============================================================================
+// preprepare/prepare/commit quorum checks
+// =============================================================================
+
+// intersectionQuorum returns the number of replicas that have to
+// agree to guarantee that at least one correct replica is shared by
+// two intersection quora
+func (instance *pbftCore) intersectionQuorum() int {
+ return (instance.N + instance.f + 2) / 2
+}
+
+// allCorrectReplicasQuorum returns the number of correct replicas (N-f)
+func (instance *pbftCore) allCorrectReplicasQuorum() int {
+ return (instance.N - instance.f)
+}
+
+func (instance *pbftCore) prePrepared(digest string, v uint64, n uint64) bool {
+ _, mInLog := instance.reqBatchStore[digest]
+
+ if digest != "" && !mInLog {
+ return false
+ }
+
+ if q, ok := instance.qset[qidx{digest, n}]; ok && q.View == v {
+ return true
+ }
+
+ cert := instance.certStore[msgID{v, n}]
+ if cert != nil {
+ p := cert.prePrepare
+ if p != nil && p.View == v && p.SequenceNumber == n && p.BatchDigest == digest {
+ return true
+ }
+ }
+ logger.Debugf("Replica %d does not have view=%d/seqNo=%d pre-prepared",
+ instance.id, v, n)
+ return false
+}
+
+func (instance *pbftCore) prepared(digest string, v uint64, n uint64) bool {
+ if !instance.prePrepared(digest, v, n) {
+ return false
+ }
+
+ if p, ok := instance.pset[n]; ok && p.View == v && p.BatchDigest == digest {
+ return true
+ }
+
+ quorum := 0
+ cert := instance.certStore[msgID{v, n}]
+ if cert == nil {
+ return false
+ }
+
+ for _, p := range cert.prepare {
+ if p.View == v && p.SequenceNumber == n && p.BatchDigest == digest {
+ quorum++
+ }
+ }
+
+ logger.Debugf("Replica %d prepare count for view=%d/seqNo=%d: %d",
+ instance.id, v, n, quorum)
+
+ return quorum >= instance.intersectionQuorum()-1
+}
+
+func (instance *pbftCore) committed(digest string, v uint64, n uint64) bool {
+ if !instance.prepared(digest, v, n) {
+ return false
+ }
+
+ quorum := 0
+ cert := instance.certStore[msgID{v, n}]
+ if cert == nil {
+ return false
+ }
+
+ for _, p := range cert.commit {
+ if p.View == v && p.SequenceNumber == n {
+ quorum++
+ }
+ }
+
+ logger.Debugf("Replica %d commit count for view=%d/seqNo=%d: %d",
+ instance.id, v, n, quorum)
+
+ return quorum >= instance.intersectionQuorum()
+}
+
+// =============================================================================
+// receive methods
+// =============================================================================
+
+func (instance *pbftCore) nullRequestHandler() {
+ if !instance.activeView {
+ return
+ }
+
+ if instance.primary(instance.view) != instance.id {
+ // backup expected a null request, but primary never sent one
+ logger.Info("Replica %d null request timer expired, sending view change", instance.id)
+ instance.sendViewChange()
+ } else {
+ // time for the primary to send a null request
+ // pre-prepare with null digest
+ logger.Info("Primary %d null request timer expired, sending null request", instance.id)
+ instance.sendPrePrepare(nil, "")
+ }
+}
+
+func (instance *pbftCore) recvMsg(msg *Message, senderID uint64) (interface{}, error) {
+ if reqBatch := msg.GetRequestBatch(); reqBatch != nil {
+ return reqBatch, nil
+ } else if preprep := msg.GetPrePrepare(); preprep != nil {
+ if senderID != preprep.ReplicaId {
+ return nil, fmt.Errorf("Sender ID included in pre-prepare message (%v) doesn't match ID corresponding to the receiving stream (%v)", preprep.ReplicaId, senderID)
+ }
+ return preprep, nil
+ } else if prep := msg.GetPrepare(); prep != nil {
+ if senderID != prep.ReplicaId {
+ return nil, fmt.Errorf("Sender ID included in prepare message (%v) doesn't match ID corresponding to the receiving stream (%v)", prep.ReplicaId, senderID)
+ }
+ return prep, nil
+ } else if commit := msg.GetCommit(); commit != nil {
+ if senderID != commit.ReplicaId {
+ return nil, fmt.Errorf("Sender ID included in commit message (%v) doesn't match ID corresponding to the receiving stream (%v)", commit.ReplicaId, senderID)
+ }
+ return commit, nil
+ } else if chkpt := msg.GetCheckpoint(); chkpt != nil {
+ if senderID != chkpt.ReplicaId {
+ return nil, fmt.Errorf("Sender ID included in checkpoint message (%v) doesn't match ID corresponding to the receiving stream (%v)", chkpt.ReplicaId, senderID)
+ }
+ return chkpt, nil
+ } else if vc := msg.GetViewChange(); vc != nil {
+ if senderID != vc.ReplicaId {
+ return nil, fmt.Errorf("Sender ID included in view-change message (%v) doesn't match ID corresponding to the receiving stream (%v)", vc.ReplicaId, senderID)
+ }
+ return vc, nil
+ } else if nv := msg.GetNewView(); nv != nil {
+ if senderID != nv.ReplicaId {
+ return nil, fmt.Errorf("Sender ID included in new-view message (%v) doesn't match ID corresponding to the receiving stream (%v)", nv.ReplicaId, senderID)
+ }
+ return nv, nil
+ } else if fr := msg.GetFetchRequestBatch(); fr != nil {
+ if senderID != fr.ReplicaId {
+ return nil, fmt.Errorf("Sender ID included in fetch-request-batch message (%v) doesn't match ID corresponding to the receiving stream (%v)", fr.ReplicaId, senderID)
+ }
+ return fr, nil
+ } else if reqBatch := msg.GetReturnRequestBatch(); reqBatch != nil {
+ // it's ok for sender ID and replica ID to differ; we're sending the original request message
+ return returnRequestBatchEvent(reqBatch), nil
+ }
+ return nil, fmt.Errorf("Invalid message: %v", msg)
+}
+
+func (instance *pbftCore) recvRequestBatch(reqBatch *RequestBatch) error {
+ digest := hash(reqBatch)
+ logger.Debugf("Replica %d received request batch %s", instance.id, digest)
+
+ instance.reqBatchStore[digest] = reqBatch
+ instance.outstandingReqBatches[digest] = reqBatch
+ instance.persistRequestBatch(digest)
+ if instance.activeView {
+ instance.softStartTimer(instance.requestTimeout, fmt.Sprintf("new request batch %s", digest))
+ }
+ if instance.primary(instance.view) == instance.id && instance.activeView {
+ instance.nullRequestTimer.Stop()
+ instance.sendPrePrepare(reqBatch, digest)
+ } else {
+ logger.Debugf("Replica %d is backup, not sending pre-prepare for request batch %s", instance.id, digest)
+ }
+ return nil
+}
+
+func (instance *pbftCore) sendPrePrepare(reqBatch *RequestBatch, digest string) {
+ logger.Debugf("Replica %d is primary, issuing pre-prepare for request batch %s", instance.id, digest)
+
+ n := instance.seqNo + 1
+ for _, cert := range instance.certStore { // check for other PRE-PREPARE for same digest, but different seqNo
+ if p := cert.prePrepare; p != nil {
+ if p.View == instance.view && p.SequenceNumber != n && p.BatchDigest == digest && digest != "" {
+ logger.Infof("Other pre-prepare found with same digest but different seqNo: %d instead of %d", p.SequenceNumber, n)
+ return
+ }
+ }
+ }
+
+ if !instance.inWV(instance.view, n) || n > instance.h+instance.L/2 {
+ logger.Debugf("Replica %d is primary, not sending pre-prepare for request batch %s because it is out of sequence numbers", instance.id, digest)
+ return
+ }
+
+ if n > instance.viewChangeSeqNo {
+ logger.Info("Primary %d about to switch to next primary, not sending pre-prepare with seqno=%d", instance.id, n)
+ return
+ }
+
+ logger.Debugf("Primary %d broadcasting pre-prepare for view=%d/seqNo=%d and digest %s", instance.id, instance.view, n, digest)
+ instance.seqNo = n
+ preprep := &PrePrepare{
+ View: instance.view,
+ SequenceNumber: n,
+ BatchDigest: digest,
+ RequestBatch: reqBatch,
+ ReplicaId: instance.id,
+ }
+ cert := instance.getCert(instance.view, n)
+ cert.prePrepare = preprep
+ cert.digest = digest
+ instance.persistQSet()
+ instance.innerBroadcast(&Message{Payload: &Message_PrePrepare{PrePrepare: preprep}})
+ instance.maybeSendCommit(digest, instance.view, n)
+}
+
+func (instance *pbftCore) resubmitRequestBatches() {
+ if instance.primary(instance.view) != instance.id {
+ return
+ }
+
+ var submissionOrder []*RequestBatch
+
+outer:
+ for d, reqBatch := range instance.outstandingReqBatches {
+ for _, cert := range instance.certStore {
+ if cert.digest == d {
+ logger.Debugf("Replica %d already has certificate for request batch %s - not going to resubmit", instance.id, d)
+ continue outer
+ }
+ }
+ logger.Debugf("Replica %d has detected request batch %s must be resubmitted", instance.id, d)
+ submissionOrder = append(submissionOrder, reqBatch)
+ }
+
+ if len(submissionOrder) == 0 {
+ return
+ }
+
+ for _, reqBatch := range submissionOrder {
+ // This is a request batch that has not been pre-prepared yet
+ // Trigger request batch processing again
+ instance.recvRequestBatch(reqBatch)
+ }
+}
+
+func (instance *pbftCore) recvPrePrepare(preprep *PrePrepare) error {
+ logger.Debugf("Replica %d received pre-prepare from replica %d for view=%d/seqNo=%d",
+ instance.id, preprep.ReplicaId, preprep.View, preprep.SequenceNumber)
+
+ if !instance.activeView {
+ logger.Debugf("Replica %d ignoring pre-prepare as we are in a view change", instance.id)
+ return nil
+ }
+
+ if instance.primary(instance.view) != preprep.ReplicaId {
+ logger.Warningf("Pre-prepare from other than primary: got %d, should be %d", preprep.ReplicaId, instance.primary(instance.view))
+ return nil
+ }
+
+ if !instance.inWV(preprep.View, preprep.SequenceNumber) {
+ if preprep.SequenceNumber != instance.h && !instance.skipInProgress {
+ logger.Warningf("Replica %d pre-prepare view different, or sequence number outside watermarks: preprep.View %d, expected.View %d, seqNo %d, low-mark %d", instance.id, preprep.View, instance.primary(instance.view), preprep.SequenceNumber, instance.h)
+ } else {
+ // This is perfectly normal
+ logger.Debugf("Replica %d pre-prepare view different, or sequence number outside watermarks: preprep.View %d, expected.View %d, seqNo %d, low-mark %d", instance.id, preprep.View, instance.primary(instance.view), preprep.SequenceNumber, instance.h)
+ }
+
+ return nil
+ }
+
+ if preprep.SequenceNumber > instance.viewChangeSeqNo {
+ logger.Info("Replica %d received pre-prepare for %d, which should be from the next primary", instance.id, preprep.SequenceNumber)
+ instance.sendViewChange()
+ return nil
+ }
+
+ cert := instance.getCert(preprep.View, preprep.SequenceNumber)
+ if cert.digest != "" && cert.digest != preprep.BatchDigest {
+ logger.Warningf("Pre-prepare found for same view/seqNo but different digest: received %s, stored %s", preprep.BatchDigest, cert.digest)
+ instance.sendViewChange()
+ return nil
+ }
+
+ cert.prePrepare = preprep
+ cert.digest = preprep.BatchDigest
+
+ // Store the request batch if, for whatever reason, we haven't received it from an earlier broadcast
+ if _, ok := instance.reqBatchStore[preprep.BatchDigest]; !ok && preprep.BatchDigest != "" {
+ digest := hash(preprep.GetRequestBatch())
+ if digest != preprep.BatchDigest {
+ logger.Warningf("Pre-prepare and request digest do not match: request %s, digest %s", digest, preprep.BatchDigest)
+ return nil
+ }
+ instance.reqBatchStore[digest] = preprep.GetRequestBatch()
+ logger.Debugf("Replica %d storing request batch %s in outstanding request batch store", instance.id, digest)
+ instance.outstandingReqBatches[digest] = preprep.GetRequestBatch()
+ instance.persistRequestBatch(digest)
+ }
+
+ instance.softStartTimer(instance.requestTimeout, fmt.Sprintf("new pre-prepare for request batch %s", preprep.BatchDigest))
+ instance.nullRequestTimer.Stop()
+
+ if instance.primary(instance.view) != instance.id && instance.prePrepared(preprep.BatchDigest, preprep.View, preprep.SequenceNumber) && !cert.sentPrepare {
+ logger.Debugf("Backup %d broadcasting prepare for view=%d/seqNo=%d", instance.id, preprep.View, preprep.SequenceNumber)
+ prep := &Prepare{
+ View: preprep.View,
+ SequenceNumber: preprep.SequenceNumber,
+ BatchDigest: preprep.BatchDigest,
+ ReplicaId: instance.id,
+ }
+ cert.sentPrepare = true
+ instance.persistQSet()
+ instance.recvPrepare(prep)
+ return instance.innerBroadcast(&Message{Payload: &Message_Prepare{Prepare: prep}})
+ }
+
+ return nil
+}
+
+func (instance *pbftCore) recvPrepare(prep *Prepare) error {
+ logger.Debugf("Replica %d received prepare from replica %d for view=%d/seqNo=%d",
+ instance.id, prep.ReplicaId, prep.View, prep.SequenceNumber)
+
+ if instance.primary(prep.View) == prep.ReplicaId {
+ logger.Warningf("Replica %d received prepare from primary, ignoring", instance.id)
+ return nil
+ }
+
+ if !instance.inWV(prep.View, prep.SequenceNumber) {
+ if prep.SequenceNumber != instance.h && !instance.skipInProgress {
+ logger.Warningf("Replica %d ignoring prepare for view=%d/seqNo=%d: not in-wv, in view %d, low water mark %d", instance.id, prep.View, prep.SequenceNumber, instance.view, instance.h)
+ } else {
+ // This is perfectly normal
+ logger.Debugf("Replica %d ignoring prepare for view=%d/seqNo=%d: not in-wv, in view %d, low water mark %d", instance.id, prep.View, prep.SequenceNumber, instance.view, instance.h)
+ }
+ return nil
+ }
+
+ cert := instance.getCert(prep.View, prep.SequenceNumber)
+
+ for _, prevPrep := range cert.prepare {
+ if prevPrep.ReplicaId == prep.ReplicaId {
+ logger.Warningf("Ignoring duplicate prepare from %d", prep.ReplicaId)
+ return nil
+ }
+ }
+ cert.prepare = append(cert.prepare, prep)
+ instance.persistPSet()
+
+ return instance.maybeSendCommit(prep.BatchDigest, prep.View, prep.SequenceNumber)
+}
+
+//
+func (instance *pbftCore) maybeSendCommit(digest string, v uint64, n uint64) error {
+ cert := instance.getCert(v, n)
+ if instance.prepared(digest, v, n) && !cert.sentCommit {
+ logger.Debugf("Replica %d broadcasting commit for view=%d/seqNo=%d",
+ instance.id, v, n)
+ commit := &Commit{
+ View: v,
+ SequenceNumber: n,
+ BatchDigest: digest,
+ ReplicaId: instance.id,
+ }
+ cert.sentCommit = true
+ instance.recvCommit(commit)
+ return instance.innerBroadcast(&Message{&Message_Commit{commit}})
+ }
+ return nil
+}
+
+func (instance *pbftCore) recvCommit(commit *Commit) error {
+ logger.Debugf("Replica %d received commit from replica %d for view=%d/seqNo=%d",
+ instance.id, commit.ReplicaId, commit.View, commit.SequenceNumber)
+
+ if !instance.inWV(commit.View, commit.SequenceNumber) {
+ if commit.SequenceNumber != instance.h && !instance.skipInProgress {
+ logger.Warningf("Replica %d ignoring commit for view=%d/seqNo=%d: not in-wv, in view %d, high water mark %d", instance.id, commit.View, commit.SequenceNumber, instance.view, instance.h)
+ } else {
+ // This is perfectly normal
+ logger.Debugf("Replica %d ignoring commit for view=%d/seqNo=%d: not in-wv, in view %d, high water mark %d", instance.id, commit.View, commit.SequenceNumber, instance.view, instance.h)
+ }
+ return nil
+ }
+
+ cert := instance.getCert(commit.View, commit.SequenceNumber)
+ for _, prevCommit := range cert.commit {
+ if prevCommit.ReplicaId == commit.ReplicaId {
+ logger.Warningf("Ignoring duplicate commit from %d", commit.ReplicaId)
+ return nil
+ }
+ }
+ cert.commit = append(cert.commit, commit)
+
+ if instance.committed(commit.BatchDigest, commit.View, commit.SequenceNumber) {
+ instance.stopTimer()
+ instance.lastNewViewTimeout = instance.newViewTimeout
+ delete(instance.outstandingReqBatches, commit.BatchDigest)
+
+ instance.executeOutstanding()
+
+ if commit.SequenceNumber == instance.viewChangeSeqNo {
+ logger.Infof("Replica %d cycling view for seqNo=%d", instance.id, commit.SequenceNumber)
+ instance.sendViewChange()
+ }
+ }
+
+ return nil
+}
+
+func (instance *pbftCore) updateHighStateTarget(target *stateUpdateTarget) {
+ if instance.highStateTarget != nil && instance.highStateTarget.seqNo >= target.seqNo {
+ logger.Debugf("Replica %d not updating state target to seqNo %d, has target for seqNo %d", instance.id, target.seqNo, instance.highStateTarget.seqNo)
+ return
+ }
+
+ instance.highStateTarget = target
+}
+
+func (instance *pbftCore) stateTransfer(optional *stateUpdateTarget) {
+ if !instance.skipInProgress {
+ logger.Debugf("Replica %d is out of sync, pending state transfer", instance.id)
+ instance.skipInProgress = true
+ instance.consumer.invalidateState()
+ }
+
+ instance.retryStateTransfer(optional)
+}
+
+func (instance *pbftCore) retryStateTransfer(optional *stateUpdateTarget) {
+ if instance.currentExec != nil {
+ logger.Debugf("Replica %d is currently mid-execution, it must wait for the execution to complete before performing state transfer", instance.id)
+ return
+ }
+
+ if instance.stateTransferring {
+ logger.Debugf("Replica %d is currently mid state transfer, it must wait for this state transfer to complete before initiating a new one", instance.id)
+ return
+ }
+
+ target := optional
+ if target == nil {
+ if instance.highStateTarget == nil {
+ logger.Debugf("Replica %d has no targets to attempt state transfer to, delaying", instance.id)
+ return
+ }
+ target = instance.highStateTarget
+ }
+
+ instance.stateTransferring = true
+
+ logger.Debugf("Replica %d is initiating state transfer to seqNo %d", instance.id, target.seqNo)
+ instance.consumer.skipTo(target.seqNo, target.id, target.replicas)
+
+}
+
+func (instance *pbftCore) executeOutstanding() {
+ if instance.currentExec != nil {
+ logger.Debugf("Replica %d not attempting to executeOutstanding because it is currently executing %d", instance.id, *instance.currentExec)
+ return
+ }
+ logger.Debugf("Replica %d attempting to executeOutstanding", instance.id)
+
+ for idx := range instance.certStore {
+ if instance.executeOne(idx) {
+ break
+ }
+ }
+
+ logger.Debugf("Replica %d certstore %+v", instance.id, instance.certStore)
+
+ instance.startTimerIfOutstandingRequests()
+}
+
+func (instance *pbftCore) executeOne(idx msgID) bool {
+ cert := instance.certStore[idx]
+
+ if idx.n != instance.lastExec+1 || cert == nil || cert.prePrepare == nil {
+ return false
+ }
+
+ if instance.skipInProgress {
+ logger.Debugf("Replica %d currently picking a starting point to resume, will not execute", instance.id)
+ return false
+ }
+
+ // we now have the right sequence number that doesn't create holes
+
+ digest := cert.digest
+ reqBatch := instance.reqBatchStore[digest]
+
+ if !instance.committed(digest, idx.v, idx.n) {
+ return false
+ }
+
+ // we have a commit certificate for this request batch
+ currentExec := idx.n
+ instance.currentExec = ¤tExec
+
+ // null request
+ if digest == "" {
+ logger.Infof("Replica %d executing/committing null request for view=%d/seqNo=%d",
+ instance.id, idx.v, idx.n)
+ instance.execDoneSync()
+ } else {
+ logger.Infof("Replica %d executing/committing request batch for view=%d/seqNo=%d and digest %s",
+ instance.id, idx.v, idx.n, digest)
+ // synchronously execute, it is the other side's responsibility to execute in the background if needed
+ instance.consumer.execute(idx.n, reqBatch)
+ }
+ return true
+}
+
+func (instance *pbftCore) Checkpoint(seqNo uint64, id []byte) {
+ if seqNo%instance.K != 0 {
+ logger.Errorf("Attempted to checkpoint a sequence number (%d) which is not a multiple of the checkpoint interval (%d)", seqNo, instance.K)
+ return
+ }
+
+ idAsString := base64.StdEncoding.EncodeToString(id)
+
+ logger.Debugf("Replica %d preparing checkpoint for view=%d/seqNo=%d and b64 id of %s",
+ instance.id, instance.view, seqNo, idAsString)
+
+ chkpt := &Checkpoint{
+ SequenceNumber: seqNo,
+ ReplicaId: instance.id,
+ Id: idAsString,
+ }
+ instance.chkpts[seqNo] = idAsString
+
+ instance.persistCheckpoint(seqNo, id)
+ instance.recvCheckpoint(chkpt)
+ instance.innerBroadcast(&Message{Payload: &Message_Checkpoint{Checkpoint: chkpt}})
+}
+
+func (instance *pbftCore) execDoneSync() {
+ if instance.currentExec != nil {
+ logger.Infof("Replica %d finished execution %d, trying next", instance.id, *instance.currentExec)
+ instance.lastExec = *instance.currentExec
+ if instance.lastExec%instance.K == 0 {
+ instance.Checkpoint(instance.lastExec, instance.consumer.getState())
+ }
+
+ } else {
+ // XXX This masks a bug, this should not be called when currentExec is nil
+ logger.Warningf("Replica %d had execDoneSync called, flagging ourselves as out of date", instance.id)
+ instance.skipInProgress = true
+ }
+ instance.currentExec = nil
+
+ instance.executeOutstanding()
+}
+
+func (instance *pbftCore) moveWatermarks(n uint64) {
+ // round down n to previous low watermark
+ h := n / instance.K * instance.K
+
+ for idx, cert := range instance.certStore {
+ if idx.n <= h {
+ logger.Debugf("Replica %d cleaning quorum certificate for view=%d/seqNo=%d",
+ instance.id, idx.v, idx.n)
+ instance.persistDelRequestBatch(cert.digest)
+ delete(instance.reqBatchStore, cert.digest)
+ delete(instance.certStore, idx)
+ }
+ }
+
+ for testChkpt := range instance.checkpointStore {
+ if testChkpt.SequenceNumber <= h {
+ logger.Debugf("Replica %d cleaning checkpoint message from replica %d, seqNo %d, b64 snapshot id %s",
+ instance.id, testChkpt.ReplicaId, testChkpt.SequenceNumber, testChkpt.Id)
+ delete(instance.checkpointStore, testChkpt)
+ }
+ }
+
+ for n := range instance.pset {
+ if n <= h {
+ delete(instance.pset, n)
+ }
+ }
+
+ for idx := range instance.qset {
+ if idx.n <= h {
+ delete(instance.qset, idx)
+ }
+ }
+
+ for n := range instance.chkpts {
+ if n < h {
+ delete(instance.chkpts, n)
+ instance.persistDelCheckpoint(n)
+ }
+ }
+
+ instance.h = h
+
+ logger.Debugf("Replica %d updated low watermark to %d",
+ instance.id, instance.h)
+
+ instance.resubmitRequestBatches()
+}
+
+func (instance *pbftCore) weakCheckpointSetOutOfRange(chkpt *Checkpoint) bool {
+ H := instance.h + instance.L
+
+ // Track the last observed checkpoint sequence number if it exceeds our high watermark, keyed by replica to prevent unbounded growth
+ if chkpt.SequenceNumber < H {
+ // For non-byzantine nodes, the checkpoint sequence number increases monotonically
+ delete(instance.hChkpts, chkpt.ReplicaId)
+ } else {
+ // We do not track the highest one, as a byzantine node could pick an arbitrarilly high sequence number
+ // and even if it recovered to be non-byzantine, we would still believe it to be far ahead
+ instance.hChkpts[chkpt.ReplicaId] = chkpt.SequenceNumber
+
+ // If f+1 other replicas have reported checkpoints that were (at one time) outside our watermarks
+ // we need to check to see if we have fallen behind.
+ if len(instance.hChkpts) >= instance.f+1 {
+ chkptSeqNumArray := make([]uint64, len(instance.hChkpts))
+ index := 0
+ for replicaID, hChkpt := range instance.hChkpts {
+ chkptSeqNumArray[index] = hChkpt
+ index++
+ if hChkpt < H {
+ delete(instance.hChkpts, replicaID)
+ }
+ }
+ sort.Sort(sortableUint64Slice(chkptSeqNumArray))
+
+ // If f+1 nodes have issued checkpoints above our high water mark, then
+ // we will never record 2f+1 checkpoints for that sequence number, we are out of date
+ // (This is because all_replicas - missed - me = 3f+1 - f - 1 = 2f)
+ if m := chkptSeqNumArray[len(chkptSeqNumArray)-(instance.f+1)]; m > H {
+ logger.Warningf("Replica %d is out of date, f+1 nodes agree checkpoint with seqNo %d exists but our high water mark is %d", instance.id, chkpt.SequenceNumber, H)
+ instance.reqBatchStore = make(map[string]*RequestBatch) // Discard all our requests, as we will never know which were executed, to be addressed in #394
+ instance.persistDelAllRequestBatches()
+ instance.moveWatermarks(m)
+ instance.outstandingReqBatches = make(map[string]*RequestBatch)
+ instance.skipInProgress = true
+ instance.consumer.invalidateState()
+ instance.stopTimer()
+
+ // TODO, reprocess the already gathered checkpoints, this will make recovery faster, though it is presently correct
+
+ return true
+ }
+ }
+ }
+
+ return false
+}
+
+func (instance *pbftCore) witnessCheckpointWeakCert(chkpt *Checkpoint) {
+ checkpointMembers := make([]uint64, instance.f+1) // Only ever invoked for the first weak cert, so guaranteed to be f+1
+ i := 0
+ for testChkpt := range instance.checkpointStore {
+ if testChkpt.SequenceNumber == chkpt.SequenceNumber && testChkpt.Id == chkpt.Id {
+ checkpointMembers[i] = testChkpt.ReplicaId
+ logger.Debugf("Replica %d adding replica %d (handle %v) to weak cert", instance.id, testChkpt.ReplicaId, checkpointMembers[i])
+ i++
+ }
+ }
+
+ snapshotID, err := base64.StdEncoding.DecodeString(chkpt.Id)
+ if nil != err {
+ err = fmt.Errorf("Replica %d received a weak checkpoint cert which could not be decoded (%s)", instance.id, chkpt.Id)
+ logger.Error(err.Error())
+ return
+ }
+
+ target := &stateUpdateTarget{
+ checkpointMessage: checkpointMessage{
+ seqNo: chkpt.SequenceNumber,
+ id: snapshotID,
+ },
+ replicas: checkpointMembers,
+ }
+ instance.updateHighStateTarget(target)
+
+ if instance.skipInProgress {
+ logger.Debugf("Replica %d is catching up and witnessed a weak certificate for checkpoint %d, weak cert attested to by %d of %d (%v)",
+ instance.id, chkpt.SequenceNumber, i, instance.replicaCount, checkpointMembers)
+ // The view should not be set to active, this should be handled by the yet unimplemented SUSPECT, see https://github.com/hyperledger/fabric/issues/1120
+ instance.retryStateTransfer(target)
+ }
+}
+
+func (instance *pbftCore) recvCheckpoint(chkpt *Checkpoint) events.Event {
+ logger.Debugf("Replica %d received checkpoint from replica %d, seqNo %d, digest %s",
+ instance.id, chkpt.ReplicaId, chkpt.SequenceNumber, chkpt.Id)
+
+ if instance.weakCheckpointSetOutOfRange(chkpt) {
+ return nil
+ }
+
+ if !instance.inW(chkpt.SequenceNumber) {
+ if chkpt.SequenceNumber != instance.h && !instance.skipInProgress {
+ // It is perfectly normal that we receive checkpoints for the watermark we just raised, as we raise it after 2f+1, leaving f replies left
+ logger.Warningf("Checkpoint sequence number outside watermarks: seqNo %d, low-mark %d", chkpt.SequenceNumber, instance.h)
+ } else {
+ logger.Debugf("Checkpoint sequence number outside watermarks: seqNo %d, low-mark %d", chkpt.SequenceNumber, instance.h)
+ }
+ return nil
+ }
+
+ instance.checkpointStore[*chkpt] = true
+
+ matching := 0
+ for testChkpt := range instance.checkpointStore {
+ if testChkpt.SequenceNumber == chkpt.SequenceNumber && testChkpt.Id == chkpt.Id {
+ matching++
+ }
+ }
+ logger.Debugf("Replica %d found %d matching checkpoints for seqNo %d, digest %s",
+ instance.id, matching, chkpt.SequenceNumber, chkpt.Id)
+
+ if matching == instance.f+1 {
+ // We do have a weak cert
+ instance.witnessCheckpointWeakCert(chkpt)
+ }
+
+ if matching < instance.intersectionQuorum() {
+ // We do not have a quorum yet
+ return nil
+ }
+
+ // It is actually just fine if we do not have this checkpoint
+ // and should not trigger a state transfer
+ // Imagine we are executing sequence number k-1 and we are slow for some reason
+ // then everyone else finishes executing k, and we receive a checkpoint quorum
+ // which we will agree with very shortly, but do not move our watermarks until
+ // we have reached this checkpoint
+ // Note, this is not divergent from the paper, as the paper requires that
+ // the quorum certificate must contain 2f+1 messages, including its own
+ chkptID, ok := instance.chkpts[chkpt.SequenceNumber]
+ if !ok {
+ logger.Debugf("Replica %d found checkpoint quorum for seqNo %d, digest %s, but it has not reached this checkpoint itself yet",
+ instance.id, chkpt.SequenceNumber, chkpt.Id)
+ if instance.skipInProgress {
+ logSafetyBound := instance.h + instance.L/2
+ // As an optimization, if we are more than half way out of our log and in state transfer, move our watermarks so we don't lose track of the network
+ // if needed, state transfer will restart on completion to a more recent point in time
+ if chkpt.SequenceNumber >= logSafetyBound {
+ logger.Debugf("Replica %d is in state transfer, but, the network seems to be moving on past %d, moving our watermarks to stay with it", instance.id, logSafetyBound)
+ instance.moveWatermarks(chkpt.SequenceNumber)
+ }
+ }
+ return nil
+ }
+
+ logger.Debugf("Replica %d found checkpoint quorum for seqNo %d, digest %s",
+ instance.id, chkpt.SequenceNumber, chkpt.Id)
+
+ if chkptID != chkpt.Id {
+ logger.Criticalf("Replica %d generated a checkpoint of %s, but a quorum of the network agrees on %s. This is almost definitely non-deterministic chaincode.",
+ instance.id, chkptID, chkpt.Id)
+ instance.stateTransfer(nil)
+ }
+
+ instance.moveWatermarks(chkpt.SequenceNumber)
+
+ return instance.processNewView()
+}
+
+// used in view-change to fetch missing assigned, non-checkpointed requests
+func (instance *pbftCore) fetchRequestBatches() (err error) {
+ var msg *Message
+ for digest := range instance.missingReqBatches {
+ msg = &Message{Payload: &Message_FetchRequestBatch{FetchRequestBatch: &FetchRequestBatch{
+ BatchDigest: digest,
+ ReplicaId: instance.id,
+ }}}
+ instance.innerBroadcast(msg)
+ }
+
+ return
+}
+
+func (instance *pbftCore) recvFetchRequestBatch(fr *FetchRequestBatch) (err error) {
+ digest := fr.BatchDigest
+ if _, ok := instance.reqBatchStore[digest]; !ok {
+ return nil // we don't have it either
+ }
+
+ reqBatch := instance.reqBatchStore[digest]
+ msg := &Message{Payload: &Message_ReturnRequestBatch{ReturnRequestBatch: reqBatch}}
+ msgPacked, err := proto.Marshal(msg)
+ if err != nil {
+ return fmt.Errorf("Error marshalling return-request-batch message: %v", err)
+ }
+
+ receiver := fr.ReplicaId
+ err = instance.consumer.unicast(msgPacked, receiver)
+
+ return
+}
+
+func (instance *pbftCore) recvReturnRequestBatch(reqBatch *RequestBatch) events.Event {
+ digest := hash(reqBatch)
+ if _, ok := instance.missingReqBatches[digest]; !ok {
+ return nil // either the wrong digest, or we got it already from someone else
+ }
+ instance.reqBatchStore[digest] = reqBatch
+ delete(instance.missingReqBatches, digest)
+ instance.persistRequestBatch(digest)
+ return instance.processNewView()
+}
+
+// =============================================================================
+// Misc. methods go here
+// =============================================================================
+
+// Marshals a Message and hands it to the Stack. If toSelf is true,
+// the message is also dispatched to the local instance's RecvMsgSync.
+func (instance *pbftCore) innerBroadcast(msg *Message) error {
+ msgRaw, err := proto.Marshal(msg)
+ if err != nil {
+ return fmt.Errorf("Cannot marshal message %s", err)
+ }
+
+ doByzantine := false
+ if instance.byzantine {
+ rand1 := rand.New(rand.NewSource(time.Now().UnixNano()))
+ doIt := rand1.Intn(3) // go byzantine about 1/3 of the time
+ if doIt == 1 {
+ doByzantine = true
+ }
+ }
+
+ // testing byzantine fault.
+ if doByzantine {
+ rand2 := rand.New(rand.NewSource(time.Now().UnixNano()))
+ ignoreidx := rand2.Intn(instance.N)
+ for i := 0; i < instance.N; i++ {
+ if i != ignoreidx && uint64(i) != instance.id { //Pick a random replica and do not send message
+ instance.consumer.unicast(msgRaw, uint64(i))
+ } else {
+ logger.Debugf("PBFT byzantine: not broadcasting to replica %v", i)
+ }
+ }
+ } else {
+ instance.consumer.broadcast(msgRaw)
+ }
+ return nil
+}
+
+func (instance *pbftCore) updateViewChangeSeqNo() {
+ if instance.viewChangePeriod <= 0 {
+ return
+ }
+ // Ensure the view change always occurs at a checkpoint boundary
+ instance.viewChangeSeqNo = instance.seqNo + instance.viewChangePeriod*instance.K - instance.seqNo%instance.K
+ logger.Debugf("Replica %d updating view change sequence number to %d", instance.id, instance.viewChangeSeqNo)
+}
+
+func (instance *pbftCore) startTimerIfOutstandingRequests() {
+ if instance.skipInProgress || instance.currentExec != nil {
+ // Do not start the view change timer if we are executing or state transferring, these take arbitrarilly long amounts of time
+ return
+ }
+
+ if len(instance.outstandingReqBatches) > 0 {
+ getOutstandingDigests := func() []string {
+ var digests []string
+ for digest := range instance.outstandingReqBatches {
+ digests = append(digests, digest)
+ }
+ return digests
+ }()
+ instance.softStartTimer(instance.requestTimeout, fmt.Sprintf("outstanding request batches %v", getOutstandingDigests))
+ } else if instance.nullRequestTimeout > 0 {
+ timeout := instance.nullRequestTimeout
+ if instance.primary(instance.view) != instance.id {
+ // we're waiting for the primary to deliver a null request - give it a bit more time
+ timeout += instance.requestTimeout
+ }
+ instance.nullRequestTimer.Reset(timeout, nullRequestEvent{})
+ }
+}
+
+func (instance *pbftCore) softStartTimer(timeout time.Duration, reason string) {
+ logger.Debugf("Replica %d soft starting new view timer for %s: %s", instance.id, timeout, reason)
+ instance.newViewTimerReason = reason
+ instance.timerActive = true
+ instance.newViewTimer.SoftReset(timeout, viewChangeTimerEvent{})
+}
+
+func (instance *pbftCore) startTimer(timeout time.Duration, reason string) {
+ logger.Debugf("Replica %d starting new view timer for %s: %s", instance.id, timeout, reason)
+ instance.timerActive = true
+ instance.newViewTimer.Reset(timeout, viewChangeTimerEvent{})
+}
+
+func (instance *pbftCore) stopTimer() {
+ logger.Debugf("Replica %d stopping a running new view timer", instance.id)
+ instance.timerActive = false
+ instance.newViewTimer.Stop()
+}
diff --git a/consensus/pbft/pbft-core_mock_test.go b/consensus/pbft/pbft-core_mock_test.go
new file mode 100644
index 00000000000..df5ef51cd3c
--- /dev/null
+++ b/consensus/pbft/pbft-core_mock_test.go
@@ -0,0 +1,185 @@
+/*
+Copyright IBM Corp. 2016 All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package pbft
+
+import (
+ "fmt"
+
+ "github.com/golang/protobuf/proto"
+ "github.com/spf13/viper"
+
+ "github.com/hyperledger/fabric/consensus/util/events"
+ pb "github.com/hyperledger/fabric/protos"
+)
+
+type pbftEndpoint struct {
+ *testEndpoint
+ pbft *pbftCore
+ sc *simpleConsumer
+ manager events.Manager
+}
+
+func (pe *pbftEndpoint) deliver(msgPayload []byte, senderHandle *pb.PeerID) {
+ senderID, _ := getValidatorID(senderHandle)
+ msg := &Message{}
+ err := proto.Unmarshal(msgPayload, msg)
+ if err != nil {
+ panic("Asked to deliver something which did not unmarshal")
+ }
+
+ pe.manager.Queue() <- &pbftMessage{msg: msg, sender: senderID}
+}
+
+func (pe *pbftEndpoint) stop() {
+ pe.pbft.close()
+}
+
+func (pe *pbftEndpoint) isBusy() bool {
+ if pe.pbft.timerActive || pe.pbft.currentExec != nil {
+ pe.net.debugMsg("TEST: Returning as busy because timer active (%v) or current exec (%v)\n", pe.pbft.timerActive, pe.pbft.currentExec)
+ return true
+ }
+
+ // TODO, this looks racey, but seems fine, because the message send is on an unbuffered
+ // channel, the send blocks until the thread has picked up the new work, still
+ // this will be removed pending the transition to an externally driven state machine
+ select {
+ case pe.manager.Queue() <- nil:
+ default:
+ pe.net.debugMsg("TEST: Returning as busy no reply on idleChan\n")
+ return true
+ }
+
+ return false
+}
+
+type pbftNetwork struct {
+ *testnet
+ pbftEndpoints []*pbftEndpoint
+}
+
+type simpleConsumer struct {
+ pe *pbftEndpoint
+ pbftNet *pbftNetwork
+ executions uint64
+ lastSeqNo uint64
+ skipOccurred bool
+ lastExecution string
+ mockPersist
+}
+
+func (sc *simpleConsumer) broadcast(msgPayload []byte) {
+ sc.pe.Broadcast(&pb.Message{Payload: msgPayload}, pb.PeerEndpoint_VALIDATOR)
+}
+func (sc *simpleConsumer) unicast(msgPayload []byte, receiverID uint64) error {
+ handle, err := getValidatorHandle(receiverID)
+ if nil != err {
+ return err
+ }
+ sc.pe.Unicast(&pb.Message{Payload: msgPayload}, handle)
+ return nil
+}
+
+func (sc *simpleConsumer) Close() {
+ // No-op
+}
+
+func (sc *simpleConsumer) sign(msg []byte) ([]byte, error) {
+ return msg, nil
+}
+
+func (sc *simpleConsumer) verify(senderID uint64, signature []byte, message []byte) error {
+ return nil
+}
+
+func (sc *simpleConsumer) viewChange(curView uint64) {
+}
+
+func (sc *simpleConsumer) invalidateState() {}
+func (sc *simpleConsumer) validateState() {}
+
+func (sc *simpleConsumer) skipTo(seqNo uint64, id []byte, replicas []uint64) {
+ sc.skipOccurred = true
+ sc.executions = seqNo
+ go func() {
+ sc.pe.manager.Queue() <- stateUpdatedEvent{
+ chkpt: &checkpointMessage{
+ seqNo: seqNo,
+ id: id,
+ },
+ target: &pb.BlockchainInfo{},
+ }
+ }()
+ sc.pbftNet.debugMsg("TEST: skipping to %d\n", seqNo)
+}
+
+func (sc *simpleConsumer) execute(seqNo uint64, reqBatch *RequestBatch) {
+ for _, req := range reqBatch.GetBatch() {
+ sc.pbftNet.debugMsg("TEST: executing request\n")
+ sc.lastExecution = hash(req)
+ sc.executions++
+ sc.lastSeqNo = seqNo
+ go func() { sc.pe.manager.Queue() <- execDoneEvent{} }()
+ }
+}
+
+func (sc *simpleConsumer) getState() []byte {
+ return []byte(fmt.Sprintf("%d", sc.executions))
+}
+
+func (sc *simpleConsumer) getLastSeqNo() (uint64, error) {
+ if sc.executions < 1 {
+ return 0, fmt.Errorf("no execution yet")
+ }
+ return sc.lastSeqNo, nil
+}
+
+func makePBFTNetwork(N int, config *viper.Viper) *pbftNetwork {
+ if config == nil {
+ config = loadConfig()
+ }
+
+ config.Set("general.N", N)
+ config.Set("general.f", (N-1)/3)
+ endpointFunc := func(id uint64, net *testnet) endpoint {
+ tep := makeTestEndpoint(id, net)
+ pe := &pbftEndpoint{
+ testEndpoint: tep,
+ manager: events.NewManagerImpl(),
+ }
+
+ pe.sc = &simpleConsumer{
+ pe: pe,
+ }
+
+ pe.pbft = newPbftCore(id, config, pe.sc, events.NewTimerFactoryImpl(pe.manager))
+ pe.manager.SetReceiver(pe.pbft)
+
+ pe.manager.Start()
+
+ return pe
+
+ }
+
+ pn := &pbftNetwork{testnet: makeTestnet(N, endpointFunc)}
+ pn.pbftEndpoints = make([]*pbftEndpoint, len(pn.endpoints))
+ for i, ep := range pn.endpoints {
+ pn.pbftEndpoints[i] = ep.(*pbftEndpoint)
+ pn.pbftEndpoints[i].sc.pbftNet = pn
+ }
+ return pn
+}
diff --git a/consensus/pbft/pbft-core_test.go b/consensus/pbft/pbft-core_test.go
new file mode 100644
index 00000000000..0517adb92e1
--- /dev/null
+++ b/consensus/pbft/pbft-core_test.go
@@ -0,0 +1,1604 @@
+/*
+Copyright IBM Corp. 2016 All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package pbft
+
+import (
+ "encoding/base64"
+ "fmt"
+ "os"
+ "reflect"
+ "strings"
+ "sync"
+ "testing"
+ "time"
+
+ "github.com/golang/protobuf/proto"
+ "github.com/op/go-logging"
+
+ "github.com/hyperledger/fabric/consensus/util/events"
+)
+
+func init() {
+ logging.SetLevel(logging.DEBUG, "")
+}
+
+func TestEnvOverride(t *testing.T) {
+ config := loadConfig()
+
+ key := "general.mode" // for a key that exists
+ envName := "CORE_PBFT_GENERAL_MODE" // env override name
+ overrideValue := "overide_test" // value to override default value with
+
+ // test key
+ if ok := config.IsSet("general.mode"); !ok {
+ t.Fatalf("Cannot test env override because \"%s\" does not seem to be set", key)
+ }
+
+ os.Setenv(envName, overrideValue)
+ // The override config value will cause other calls to fail unless unset.
+ defer func() {
+ os.Unsetenv(envName)
+ }()
+
+ if ok := config.IsSet("general.mode"); !ok {
+ t.Fatalf("Env override in place, and key \"%s\" is not set", key)
+ }
+
+ // read key
+ configVal := config.GetString("general.mode")
+ if configVal != overrideValue {
+ t.Fatalf("Env override in place, expected key \"%s\" to be \"%s\" but instead got \"%s\"", key, overrideValue, configVal)
+ }
+
+}
+
+func TestMaliciousPrePrepare(t *testing.T) {
+ mock := &omniProto{
+ broadcastImpl: func(msgPayload []byte) {
+ t.Fatalf("Expected to ignore malicious pre-prepare")
+ },
+ }
+ instance := newPbftCore(1, loadConfig(), mock, &inertTimerFactory{})
+ defer instance.close()
+ instance.replicaCount = 5
+
+ pbftMsg := &Message_PrePrepare{&PrePrepare{
+ View: 0,
+ SequenceNumber: 1,
+ BatchDigest: hash(createPbftReqBatch(1, 1)),
+ RequestBatch: createPbftReqBatch(1, 2),
+ ReplicaId: 0,
+ }}
+ events.SendEvent(instance, pbftMsg)
+}
+
+func TestWrongReplicaID(t *testing.T) {
+ mock := &omniProto{}
+ instance := newPbftCore(0, loadConfig(), mock, &inertTimerFactory{})
+ defer instance.close()
+
+ reqBatch := createPbftReqBatch(1, 1)
+ pbftMsg := &Message{Payload: &Message_PrePrepare{PrePrepare: &PrePrepare{
+ View: 0,
+ SequenceNumber: 1,
+ BatchDigest: hash(reqBatch),
+ RequestBatch: reqBatch,
+ ReplicaId: 1,
+ }}}
+ next, err := instance.recvMsg(pbftMsg, 2)
+
+ if next != nil || err == nil {
+ t.Fatalf("Shouldn't have processed message with incorrect replica ID")
+ }
+ if err != nil {
+ rightError := strings.HasPrefix(err.Error(), "Sender ID")
+ if !rightError {
+ t.Fatalf("Should have returned error about incorrect replica ID on the incoming message")
+ }
+ }
+}
+
+func TestIncompletePayload(t *testing.T) {
+ mock := &omniProto{}
+ instance := newPbftCore(1, loadConfig(), mock, &inertTimerFactory{})
+ defer instance.close()
+ instance.replicaCount = 5
+
+ broadcaster := uint64(generateBroadcaster(instance.replicaCount))
+
+ checkMsg := func(msg *Message, errMsg string, args ...interface{}) {
+ mock.broadcastImpl = func(msgPayload []byte) {
+ t.Errorf(errMsg, args...)
+ }
+ events.SendEvent(instance, pbftMessageEvent{msg: msg, sender: broadcaster})
+ }
+
+ checkMsg(&Message{}, "Expected to reject empty message")
+ checkMsg(&Message{Payload: &Message_PrePrepare{PrePrepare: &PrePrepare{ReplicaId: broadcaster}}}, "Expected to reject empty pre-prepare")
+}
+
+func TestNetwork(t *testing.T) {
+ validatorCount := 7
+ net := makePBFTNetwork(validatorCount, nil)
+
+ reqBatch := createPbftReqBatch(1, uint64(generateBroadcaster(validatorCount)))
+ net.pbftEndpoints[0].manager.Queue() <- reqBatch
+
+ err := net.process()
+ if err != nil {
+ t.Fatalf("Processing failed: %s", err)
+ }
+
+ for _, pep := range net.pbftEndpoints {
+ if pep.sc.executions <= 0 {
+ t.Errorf("Instance %d did not execute transaction", pep.id)
+ continue
+ }
+ if pep.sc.executions != 1 {
+ t.Errorf("Instance %d executed more than one transaction", pep.id)
+ continue
+ }
+ if !reflect.DeepEqual(pep.sc.lastExecution, hash(reqBatch.GetBatch()[0])) {
+ t.Errorf("Instance %d executed wrong transaction, %x should be %x",
+ pep.id, pep.sc.lastExecution, hash(reqBatch.GetBatch()[0]))
+ }
+ }
+}
+
+type checkpointConsumer struct {
+ simpleConsumer
+ execWait *sync.WaitGroup
+}
+
+func (cc *checkpointConsumer) execute(seqNo uint64, tx []byte) {
+}
+
+func TestCheckpoint(t *testing.T) {
+ execWait := &sync.WaitGroup{}
+ finishWait := &sync.WaitGroup{}
+
+ validatorCount := 4
+ config := loadConfig()
+ config.Set("general.K", 2)
+ config.Set("general.logmultiplier", 2)
+ net := makePBFTNetwork(validatorCount, config)
+ defer net.stop()
+
+ execReqBatch := func(tag int64) {
+ net.pbftEndpoints[0].manager.Queue() <- createPbftReqBatch(tag, uint64(generateBroadcaster(validatorCount)))
+ net.process()
+ }
+
+ // execWait is 0, and execute will proceed
+ execReqBatch(1)
+ execReqBatch(2)
+ finishWait.Wait()
+ net.process()
+
+ for _, pep := range net.pbftEndpoints {
+ if len(pep.pbft.chkpts) != 1 {
+ t.Errorf("Expected 1 checkpoint, found %d", len(pep.pbft.chkpts))
+ continue
+ }
+
+ if _, ok := pep.pbft.chkpts[2]; !ok {
+ t.Errorf("Expected checkpoint for seqNo 2")
+ continue
+ }
+
+ if pep.pbft.h != 2 {
+ t.Errorf("Expected low water mark to be 2, got %d", pep.pbft.h)
+ continue
+ }
+ }
+
+ // this will block executes for now
+ execWait.Add(1)
+ execReqBatch(3)
+ execReqBatch(4)
+ execReqBatch(5)
+ execReqBatch(6)
+
+ // by now the requests should have committed, but not yet executed
+ // we also reached the high water mark by now.
+
+ execReqBatch(7)
+
+ // request 7 should not have committed, because no more free seqNo
+ // could be assigned.
+
+ // unblock executes.
+ execWait.Add(-1)
+
+ net.process()
+ finishWait.Wait() // Decoupling the execution thread makes this nastiness necessary
+ net.process()
+
+ // by now request 7 should have been confirmed and executed
+
+ for _, pep := range net.pbftEndpoints {
+ expectedExecutions := uint64(7)
+ if pep.sc.executions != expectedExecutions {
+ t.Errorf("Should have executed %d, got %d instead for replica %d", expectedExecutions, pep.sc.executions, pep.id)
+ }
+ }
+}
+
+func TestLostPrePrepare(t *testing.T) {
+ validatorCount := 4
+ net := makePBFTNetwork(validatorCount, nil)
+ defer net.stop()
+
+ net.pbftEndpoints[0].manager.Queue() <- createPbftReqBatch(1, uint64(generateBroadcaster(validatorCount)))
+
+ // clear all messages sent by primary
+ msg := <-net.msgs
+ prePrep := &Message{}
+ err := proto.Unmarshal(msg.msg, prePrep)
+ if err != nil {
+ t.Fatalf("Error unmarshaling message")
+ }
+ net.clearMessages()
+
+ // deliver pre-prepare to subset of replicas
+ for _, pep := range net.pbftEndpoints[1 : len(net.pbftEndpoints)-1] {
+ pep.manager.Queue() <- prePrep.GetPrePrepare()
+ }
+
+ err = net.process()
+ if err != nil {
+ t.Fatalf("Processing failed: %s", err)
+ }
+
+ for _, pep := range net.pbftEndpoints {
+ if pep.id != 3 && pep.sc.executions != 1 {
+ t.Errorf("Expected execution on replica %d", pep.id)
+ continue
+ }
+ if pep.id == 3 && pep.sc.executions > 0 {
+ t.Errorf("Expected no execution")
+ continue
+ }
+ }
+}
+
+func TestInconsistentPrePrepare(t *testing.T) {
+ validatorCount := 4
+ net := makePBFTNetwork(validatorCount, nil)
+ defer net.stop()
+
+ makePP := func(tag int64) *PrePrepare {
+ reqBatch := createPbftReqBatch(tag, uint64(generateBroadcaster(validatorCount)))
+ preprep := &PrePrepare{
+ View: 0,
+ SequenceNumber: 1,
+ BatchDigest: hash(reqBatch),
+ RequestBatch: reqBatch,
+ ReplicaId: 0,
+ }
+ return preprep
+ }
+
+ net.pbftEndpoints[0].manager.Queue() <- makePP(1).GetRequestBatch()
+
+ // clear all messages sent by primary
+ net.clearMessages()
+
+ // replace with fake messages
+ net.pbftEndpoints[1].manager.Queue() <- makePP(1)
+ net.pbftEndpoints[2].manager.Queue() <- makePP(2)
+ net.pbftEndpoints[3].manager.Queue() <- makePP(3)
+
+ net.process()
+
+ for n, pep := range net.pbftEndpoints {
+ if pep.sc.executions < 1 || pep.sc.executions > 3 {
+ t.Errorf("Replica %d expected [1,3] executions, got %d", n, pep.sc.executions)
+ continue
+ }
+ }
+}
+
+// This test is designed to detect a conflation of S and S' from the paper in the view change
+func TestViewChangeWatermarksMovement(t *testing.T) {
+ instance := newPbftCore(0, loadConfig(), &omniProto{
+ viewChangeImpl: func(v uint64) {},
+ skipToImpl: func(s uint64, id []byte, replicas []uint64) {
+ t.Fatalf("Should not have attempted to initiate state transfer")
+ },
+ broadcastImpl: func(b []byte) {},
+ }, &inertTimerFactory{})
+ instance.activeView = false
+ instance.view = 1
+ instance.lastExec = 10
+
+ vset := make([]*ViewChange, 3)
+
+ // Replica 0 sent checkpoints for 10
+ vset[0] = &ViewChange{
+ H: 5,
+ Cset: []*ViewChange_C{
+ {
+ SequenceNumber: 10,
+ Id: "ten",
+ },
+ },
+ }
+
+ // Replica 1 sent checkpoints for 10
+ vset[1] = &ViewChange{
+ H: 5,
+ Cset: []*ViewChange_C{
+ {
+ SequenceNumber: 10,
+ Id: "ten",
+ },
+ },
+ }
+
+ // Replica 2 sent checkpoints for 10
+ vset[2] = &ViewChange{
+ H: 5,
+ Cset: []*ViewChange_C{
+ {
+ SequenceNumber: 10,
+ Id: "ten",
+ },
+ },
+ }
+
+ xset := make(map[uint64]string)
+ xset[11] = ""
+
+ instance.newViewStore[1] = &NewView{
+ View: 1,
+ Vset: vset,
+ Xset: xset,
+ ReplicaId: 1,
+ }
+
+ if _, ok := instance.processNewView().(viewChangedEvent); !ok {
+ t.Fatalf("Failed to successfully process new view")
+ }
+
+ expected := uint64(10)
+ if instance.h != expected {
+ t.Fatalf("Expected to move high watermark to %d, but picked %d", expected, instance.h)
+ }
+}
+
+// This test is designed to detect a conflation of S and S' from the paper in the view change
+func TestViewChangeCheckpointSelection(t *testing.T) {
+ instance := &pbftCore{
+ f: 1,
+ N: 4,
+ id: 0,
+ }
+
+ vset := make([]*ViewChange, 3)
+
+ // Replica 0 sent checkpoints for 5
+ vset[0] = &ViewChange{
+ H: 5,
+ Cset: []*ViewChange_C{
+ {
+ SequenceNumber: 10,
+ Id: "ten",
+ },
+ },
+ }
+
+ // Replica 1 sent checkpoints for 5
+ vset[1] = &ViewChange{
+ H: 5,
+ Cset: []*ViewChange_C{
+ {
+ SequenceNumber: 10,
+ Id: "ten",
+ },
+ },
+ }
+
+ // Replica 2 sent checkpoints for 15
+ vset[2] = &ViewChange{
+ H: 10,
+ Cset: []*ViewChange_C{
+ {
+ SequenceNumber: 15,
+ Id: "fifteen",
+ },
+ },
+ }
+
+ checkpoint, ok, _ := instance.selectInitialCheckpoint(vset)
+
+ if !ok {
+ t.Fatalf("Failed to pick correct a checkpoint for view change")
+ }
+
+ expected := uint64(10)
+ if checkpoint.SequenceNumber != expected {
+ t.Fatalf("Expected to pick checkpoint %d, but picked %d", expected, checkpoint.SequenceNumber)
+ }
+}
+
+func TestViewChange(t *testing.T) {
+ validatorCount := 4
+ config := loadConfig()
+ config.Set("general.K", 2)
+ config.Set("general.logmultiplier", 2)
+ net := makePBFTNetwork(validatorCount, config)
+ defer net.stop()
+
+ execReqBatch := func(tag int64) {
+ net.pbftEndpoints[0].manager.Queue() <- createPbftReqBatch(tag, uint64(generateBroadcaster(validatorCount)))
+ net.process()
+ }
+
+ execReqBatch(1)
+ execReqBatch(2)
+ execReqBatch(3)
+
+ for i := 2; i < len(net.pbftEndpoints); i++ {
+ net.pbftEndpoints[i].pbft.sendViewChange()
+ }
+
+ err := net.process()
+ if err != nil {
+ t.Fatalf("Processing failed: %s", err)
+ }
+
+ if net.pbftEndpoints[1].pbft.view != 1 || net.pbftEndpoints[0].pbft.view != 1 {
+ t.Fatalf("Replicas did not follow f+1 crowd to trigger view-change")
+ }
+
+ cp, ok, _ := net.pbftEndpoints[1].pbft.selectInitialCheckpoint(net.pbftEndpoints[1].pbft.getViewChanges())
+ if !ok || cp.SequenceNumber != 2 {
+ t.Fatalf("Wrong new initial checkpoint: %+v",
+ net.pbftEndpoints[1].pbft.viewChangeStore)
+ }
+
+ msgList := net.pbftEndpoints[1].pbft.assignSequenceNumbers(net.pbftEndpoints[1].pbft.getViewChanges(), cp.SequenceNumber)
+ if msgList[4] != "" || msgList[5] != "" || msgList[3] == "" {
+ t.Fatalf("Wrong message list: %+v", msgList)
+ }
+}
+
+func TestInconsistentDataViewChange(t *testing.T) {
+ validatorCount := 4
+ net := makePBFTNetwork(validatorCount, nil)
+ defer net.stop()
+
+ makePP := func(tag int64) *PrePrepare {
+ reqBatch := createPbftReqBatch(tag, uint64(generateBroadcaster(validatorCount)))
+ preprep := &PrePrepare{
+ View: 0,
+ SequenceNumber: 1,
+ BatchDigest: hash(reqBatch),
+ RequestBatch: reqBatch,
+ ReplicaId: 0,
+ }
+ return preprep
+ }
+
+ net.pbftEndpoints[0].manager.Queue() <- makePP(0).GetRequestBatch()
+
+ // clear all messages sent by primary
+ net.clearMessages()
+
+ // replace with fake messages
+ net.pbftEndpoints[1].manager.Queue() <- makePP(1)
+ net.pbftEndpoints[2].manager.Queue() <- makePP(1)
+ net.pbftEndpoints[3].manager.Queue() <- makePP(0)
+
+ err := net.process()
+ if err != nil {
+ t.Fatalf("Processing failed: %s", err)
+ }
+
+ for _, pep := range net.pbftEndpoints {
+ if pep.sc.executions < 1 {
+ t.Errorf("Expected execution")
+ continue
+ }
+ }
+}
+
+func TestViewChangeWithStateTransfer(t *testing.T) {
+ validatorCount := 4
+ net := makePBFTNetwork(validatorCount, nil)
+ defer net.stop()
+
+ var err error
+
+ for _, pep := range net.pbftEndpoints {
+ pep.pbft.K = 2
+ pep.pbft.L = 6
+ pep.pbft.requestTimeout = 500 * time.Millisecond
+ }
+
+ broadcaster := uint64(generateBroadcaster(validatorCount))
+
+ makePP := func(tag int64) *PrePrepare {
+ reqBatch := createPbftReqBatch(tag, broadcaster)
+ preprep := &PrePrepare{
+ View: 0,
+ SequenceNumber: uint64(tag),
+ BatchDigest: hash(reqBatch),
+ RequestBatch: reqBatch,
+ ReplicaId: 0,
+ }
+ return preprep
+ }
+
+ // Have primary advance the sequence number past a checkpoint for replicas 0,1,2
+ for i := int64(1); i <= 3; i++ {
+ net.pbftEndpoints[0].manager.Queue() <- makePP(i).GetRequestBatch()
+
+ // clear all messages sent by primary
+ net.clearMessages()
+
+ net.pbftEndpoints[0].manager.Queue() <- makePP(i)
+ net.pbftEndpoints[1].manager.Queue() <- makePP(i)
+ net.pbftEndpoints[2].manager.Queue() <- makePP(i)
+
+ err = net.process()
+ if err != nil {
+ t.Fatalf("Processing failed: %s", err)
+ }
+
+ }
+
+ fmt.Println("Done with stage 1")
+
+ // Add to replica 3's complaint, cause a view change
+ net.pbftEndpoints[1].pbft.sendViewChange()
+ net.pbftEndpoints[2].pbft.sendViewChange()
+ err = net.process()
+ if err != nil {
+ t.Fatalf("Processing failed: %s", err)
+ }
+
+ fmt.Println("Done with stage 3")
+
+ net.pbftEndpoints[1].manager.Queue() <- makePP(5).GetRequestBatch()
+ err = net.process()
+ if err != nil {
+ t.Fatalf("Processing failed: %s", err)
+ }
+
+ for _, pep := range net.pbftEndpoints {
+ if pep.sc.executions != 4 {
+ t.Errorf("Replica %d expected execution through seqNo 5 with one null exec, got %d executions", pep.pbft.id, pep.sc.executions)
+ continue
+ }
+ }
+ fmt.Println("Done with stage 3")
+}
+
+func TestNewViewTimeout(t *testing.T) {
+ millisUntilTimeout := time.Duration(800)
+ validatorCount := 4
+ config := loadConfig()
+ config.Set("general.timeout.request", "400ms")
+ config.Set("general.timeout.viewchange", "800ms")
+ net := makePBFTNetwork(validatorCount, config)
+ defer net.stop()
+
+ replica1Disabled := false
+ net.filterFn = func(src int, dst int, msg []byte) []byte {
+ if dst == -1 && src == 1 && replica1Disabled {
+ return nil
+ }
+ return msg
+ }
+
+ go net.processContinually()
+
+ reqBatch := createPbftReqBatch(1, uint64(generateBroadcaster(validatorCount)))
+
+ // This will eventually trigger 1's request timeout
+ // We check that one single timed out replica will not keep trying to change views by itself
+ net.pbftEndpoints[1].manager.Queue() <- reqBatch
+ fmt.Println("Debug: Sleeping 1")
+ time.Sleep(5 * millisUntilTimeout * time.Millisecond)
+ fmt.Println("Debug: Waking 1")
+
+ // This will eventually trigger 3's request timeout, which will lead to a view change to 1.
+ // However, we disable 1, which will disable the new-view going through.
+ // This checks that replicas will automatically move to view 2 when the view change times out.
+ // However, 2 does not know about the missing request, and therefore the request will not be
+ // pre-prepared and finally executed.
+ replica1Disabled = true
+ net.pbftEndpoints[3].manager.Queue() <- reqBatch
+ fmt.Println("Debug: Sleeping 2")
+ time.Sleep(5 * millisUntilTimeout * time.Millisecond)
+ fmt.Println("Debug: Waking 2")
+
+ // So far, we are in view 2, and replica 1 and 3 (who got the request) in view change to view 3.
+ // Submitting the request to 0 will eventually trigger its view-change timeout, which will make
+ // all replicas move to view 3 and finally process the request.
+ net.pbftEndpoints[0].manager.Queue() <- reqBatch
+ fmt.Println("Debug: Sleeping 3")
+ time.Sleep(5 * millisUntilTimeout * time.Millisecond)
+ fmt.Println("Debug: Waking 3")
+
+ for i, pep := range net.pbftEndpoints {
+ if pep.pbft.view < 3 {
+ t.Errorf("Should have reached view 3, got %d instead for replica %d", pep.pbft.view, i)
+ }
+ executionsExpected := uint64(1)
+ if pep.sc.executions != executionsExpected {
+ t.Errorf("Should have executed %d, got %d instead for replica %d", executionsExpected, pep.sc.executions, i)
+ }
+ }
+}
+
+func TestViewChangeUpdateSeqNo(t *testing.T) {
+ millisUntilTimeout := 400 * time.Millisecond
+ validatorCount := 4
+ config.Set("general.timeout.request", "400ms")
+ config.Set("general.timeout.viewchange", "400ms")
+ net := makePBFTNetwork(validatorCount, config)
+ for _, pe := range net.pbftEndpoints {
+ pe.pbft.lastExec = 99
+ pe.pbft.h = 99 / pe.pbft.K * pe.pbft.K
+ }
+ net.pbftEndpoints[0].pbft.seqNo = 99
+
+ go net.processContinually()
+
+ broadcaster := uint64(generateBroadcaster(validatorCount))
+
+ reqBatch := createPbftReqBatch(1, broadcaster)
+ net.pbftEndpoints[0].manager.Queue() <- reqBatch
+ time.Sleep(5 * millisUntilTimeout)
+ // Now we all have executed seqNo 100. After triggering a
+ // view change, the new primary should pick up right after
+ // that.
+
+ net.pbftEndpoints[0].pbft.sendViewChange()
+ net.pbftEndpoints[1].pbft.sendViewChange()
+ time.Sleep(5 * millisUntilTimeout)
+
+ reqBatch = createPbftReqBatch(2, broadcaster)
+ net.pbftEndpoints[1].manager.Queue() <- reqBatch
+ time.Sleep(5 * millisUntilTimeout)
+
+ net.stop()
+ for i, pep := range net.pbftEndpoints {
+ if pep.pbft.view < 1 {
+ t.Errorf("Should have reached view 3, got %d instead for replica %d", pep.pbft.view, i)
+ }
+ executionsExpected := uint64(2)
+ if pep.sc.executions != executionsExpected {
+ t.Errorf("Should have executed %d, got %d instead for replica %d", executionsExpected, pep.sc.executions, i)
+ }
+ }
+}
+
+// Test for issue #1119
+func TestSendQueueThrottling(t *testing.T) {
+ prePreparesSent := 0
+
+ mock := &omniProto{}
+ instance := newPbftCore(0, loadConfig(), mock, &inertTimerFactory{})
+ instance.f = 1
+ instance.K = 2
+ instance.L = 4
+ instance.consumer = &omniProto{
+ broadcastImpl: func(p []byte) {
+ prePreparesSent++
+ },
+ }
+ defer instance.close()
+
+ for j := 0; j < 4; j++ {
+ events.SendEvent(instance, createPbftReqBatch(int64(j), 0)) // replica ID for req doesn't matter
+ }
+
+ expected := 2
+ if prePreparesSent != expected {
+ t.Fatalf("Expected to send only %d pre-prepares, but got %d messages", expected, prePreparesSent)
+ }
+}
+
+// From issue #687
+func TestWitnessCheckpointOutOfBounds(t *testing.T) {
+ mock := &omniProto{}
+ instance := newPbftCore(1, loadConfig(), mock, &inertTimerFactory{})
+ instance.f = 1
+ instance.K = 2
+ instance.L = 4
+ defer instance.close()
+
+ events.SendEvent(instance, &Checkpoint{
+ SequenceNumber: 6,
+ ReplicaId: 0,
+ })
+
+ instance.moveWatermarks(6)
+
+ // This causes the list of high checkpoints to grow to be f+1
+ // even though there are not f+1 checkpoints witnessed outside our range
+ // historically, this caused an index out of bounds error
+ events.SendEvent(instance, &Checkpoint{
+ SequenceNumber: 10,
+ ReplicaId: 3,
+ })
+}
+
+// From issue #687
+func TestWitnessFallBehindMissingPrePrepare(t *testing.T) {
+ mock := &omniProto{}
+ instance := newPbftCore(1, loadConfig(), mock, &inertTimerFactory{})
+ instance.f = 1
+ instance.K = 2
+ instance.L = 4
+ defer instance.close()
+
+ events.SendEvent(instance, &Commit{
+ SequenceNumber: 2,
+ ReplicaId: 0,
+ })
+
+ // Historically, the lack of prePrepare associated with the commit would cause
+ // a nil pointer reference
+ instance.moveWatermarks(6)
+}
+
+func TestFallBehind(t *testing.T) {
+ validatorCount := 4
+ config := loadConfig()
+ config.Set("general.K", 2)
+ config.Set("general.logmultiplier", 2)
+ net := makePBFTNetwork(validatorCount, config)
+ defer net.stop()
+
+ execReqBatch := func(tag int64, skipThree bool) {
+ net.pbftEndpoints[0].manager.Queue() <- createPbftReqBatch(tag, uint64(generateBroadcaster(validatorCount)))
+
+ if skipThree {
+ // Send the request for consensus to everone but replica 3
+ net.filterFn = func(src, replica int, msg []byte) []byte {
+ if src != -1 && replica == 3 {
+ return nil
+ }
+ return msg
+ }
+ } else {
+ // Send the request for consensus to everone
+ net.filterFn = nil
+ }
+ err := net.process()
+ if err != nil {
+ t.Fatalf("Processing failed: %s", err)
+ }
+ }
+
+ pep := net.pbftEndpoints[3]
+ pbft := pep.pbft
+
+ // Send enough requests to get to a checkpoint quorum certificate with sequence number L+K
+ execReqBatch(1, true)
+ for i := int64(2); uint64(i) <= pbft.L+pbft.K; i++ {
+ execReqBatch(i, false)
+ }
+
+ if !pbft.skipInProgress {
+ t.Fatalf("Replica did not detect that it has fallen behind.")
+ }
+
+ if len(pbft.chkpts) != 0 {
+ t.Fatalf("Expected no checkpoints, found %d", len(pbft.chkpts))
+ }
+
+ if pbft.h != pbft.L+pbft.K {
+ t.Fatalf("Expected low water mark to be %d, got %d", pbft.L+pbft.K, pbft.h)
+ }
+
+ // Send enough requests to get to a weak checkpoint certificate certain with sequence number L+K*2
+ for i := int64(pbft.L + pbft.K + 1); uint64(i) <= pbft.L+pbft.K*2; i++ {
+ execReqBatch(i, false)
+ }
+
+ if !pep.sc.skipOccurred {
+ t.Fatalf("Request failed to kick off state transfer")
+ }
+
+ execReqBatch(int64(pbft.L+pbft.K*2+1), false)
+
+ if pep.sc.executions < pbft.L+pbft.K*2 {
+ t.Fatalf("Replica did not perform state transfer")
+ }
+
+ // XXX currently disabled, need to resync view# during/after state transfer
+ // if pep.sc.executions != pbft.L+pbft.K*2+1 {
+ // t.Fatalf("Replica did not begin participating normally after state transfer completed")
+ //}
+}
+
+func TestPbftF0(t *testing.T) {
+ net := makePBFTNetwork(1, nil)
+ defer net.stop()
+
+ reqBatch := createPbftReqBatch(1, 0)
+ net.pbftEndpoints[0].manager.Queue() <- reqBatch
+
+ err := net.process()
+ if err != nil {
+ t.Fatalf("Processing failed: %s", err)
+ }
+
+ for _, pep := range net.pbftEndpoints {
+ if pep.sc.executions < 1 {
+ t.Errorf("Instance %d did not execute transaction", pep.id)
+ continue
+ }
+ if pep.sc.executions >= 2 {
+ t.Errorf("Instance %d executed more than one transaction", pep.id)
+ continue
+ }
+ if !reflect.DeepEqual(pep.sc.lastExecution, hash(reqBatch.GetBatch()[0])) {
+ t.Errorf("Instance %d executed wrong transaction, %x should be %x",
+ pep.id, pep.sc.lastExecution, hash(reqBatch.GetBatch()[0]))
+ }
+ }
+}
+
+// Make sure the request timer doesn't inflate the view timeout by firing during view change
+func TestRequestTimerDuringViewChange(t *testing.T) {
+ mock := &omniProto{
+ signImpl: func(msg []byte) ([]byte, error) { return msg, nil },
+ verifyImpl: func(senderID uint64, signature []byte, message []byte) error { return nil },
+ broadcastImpl: func(msg []byte) {
+ t.Errorf("Should not send the view change message during a view change")
+ },
+ }
+ instance, manager := createRunningPbftWithManager(1, loadConfig(), mock)
+ defer manager.Halt()
+ instance.f = 1
+ instance.K = 2
+ instance.L = 4
+ instance.requestTimeout = time.Millisecond
+ instance.activeView = false
+ defer instance.close()
+
+ manager.Queue() <- createPbftReqBatch(1, 1) // replica ID should not correspond to the primary
+
+ time.Sleep(100 * time.Millisecond)
+}
+
+// TestReplicaCrash1 simulates the restart of replicas 0 and 1 after
+// some state has been built (one request executed). At the time of
+// the restart, replica 0 is also the primary. All three requests
+// submitted should also be executed on all replicas.
+func TestReplicaCrash1(t *testing.T) {
+ validatorCount := 4
+ config := loadConfig()
+ config.Set("general.K", 2)
+ config.Set("general.logmultiplier", 2)
+ net := makePBFTNetwork(validatorCount, config)
+ defer net.stop()
+
+ net.pbftEndpoints[0].manager.Queue() <- createPbftReqBatch(1, uint64(generateBroadcaster(validatorCount)))
+ net.process()
+
+ for id := 0; id < 2; id++ {
+ pe := net.pbftEndpoints[id]
+ pe.pbft = newPbftCore(uint64(id), loadConfig(), pe.sc, events.NewTimerFactoryImpl(pe.manager))
+ pe.manager.SetReceiver(pe.pbft)
+ pe.pbft.N = 4
+ pe.pbft.f = (4 - 1) / 3
+ pe.pbft.K = 2
+ pe.pbft.L = 2 * pe.pbft.K
+ }
+
+ net.pbftEndpoints[0].manager.Queue() <- createPbftReqBatch(2, uint64(generateBroadcaster(validatorCount)))
+ net.pbftEndpoints[0].manager.Queue() <- createPbftReqBatch(3, uint64(generateBroadcaster(validatorCount)))
+ net.process()
+
+ for _, pep := range net.pbftEndpoints {
+ if pep.sc.executions != 3 {
+ t.Errorf("Expected 3 executions on replica %d, got %d", pep.id, pep.sc.executions)
+ continue
+ }
+
+ if pep.pbft.view != 0 {
+ t.Errorf("Replica %d should still be in view 0, is %v %d", pep.id, pep.pbft.activeView, pep.pbft.view)
+ }
+ }
+}
+
+// TestReplicaCrash2 is a misnomer. It simulates a situation where
+// one replica (#3) is byzantine and does not participate at all.
+// Additionally, for view<2 and seqno=1, the network drops commit
+// messages to all but replica 1.
+func TestReplicaCrash2(t *testing.T) {
+ validatorCount := 4
+ config := loadConfig()
+ config.Set("general.timeout.request", "800ms")
+ config.Set("general.timeout.viewchange", "800ms")
+ config.Set("general.K", 2)
+ config.Set("general.logmultiplier", 2)
+ net := makePBFTNetwork(validatorCount, config)
+ defer net.stop()
+
+ filterMsg := true
+ net.filterFn = func(src int, dst int, msg []byte) []byte {
+ if dst == 3 { // 3 is byz
+ return nil
+ }
+ pm := &Message{}
+ err := proto.Unmarshal(msg, pm)
+ if err != nil {
+ t.Fatal(err)
+ }
+ // filter commits to all but 1
+ commit := pm.GetCommit()
+ if filterMsg && dst != -1 && dst != 1 && commit != nil && commit.View < 2 {
+ logger.Infof("filtering commit message from %d to %d", src, dst)
+ return nil
+ }
+ return msg
+ }
+
+ net.pbftEndpoints[0].manager.Queue() <- createPbftReqBatch(1, uint64(generateBroadcaster(validatorCount)))
+ net.process()
+
+ logger.Info("stopping filtering")
+ filterMsg = false
+ primary := net.pbftEndpoints[0].pbft.primary(net.pbftEndpoints[0].pbft.view)
+ net.pbftEndpoints[primary].manager.Queue() <- createPbftReqBatch(2, uint64(generateBroadcaster(validatorCount)))
+ net.pbftEndpoints[primary].manager.Queue() <- createPbftReqBatch(3, uint64(generateBroadcaster(validatorCount)))
+ net.pbftEndpoints[primary].manager.Queue() <- createPbftReqBatch(4, uint64(generateBroadcaster(validatorCount)))
+ go net.processContinually()
+ time.Sleep(5 * time.Second)
+
+ for _, pep := range net.pbftEndpoints {
+ if pep.id != 3 && pep.sc.executions != 4 {
+ t.Errorf("Expected 4 executions on replica %d, got %d", pep.id, pep.sc.executions)
+ continue
+ }
+ if pep.id == 3 && pep.sc.executions > 0 {
+ t.Errorf("Expected no execution")
+ continue
+ }
+ }
+}
+
+// TestReplicaCrash3 simulates the restart requiring a view change
+// to a checkpoint which was restored from the persistence state
+// Replicas 0,1,2 participate up to a checkpoint, then all crash
+// Then replicas 0,1,3 start back up, and a view change must be
+// triggered to get vp3 up to speed
+func TestReplicaCrash3(t *testing.T) {
+ validatorCount := 4
+ config := loadConfig()
+ config.Set("general.K", 2)
+ config.Set("general.logmultiplier", 2)
+ net := makePBFTNetwork(validatorCount, config)
+ defer net.stop()
+
+ twoOffline := false
+ threeOffline := true
+ net.filterFn = func(src int, dst int, msg []byte) []byte {
+ if twoOffline && dst == 2 { // 2 is 'offline'
+ return nil
+ }
+ if threeOffline && dst == 3 { // 3 is 'offline'
+ return nil
+ }
+ return msg
+ }
+
+ for i := int64(1); i <= 8; i++ {
+ net.pbftEndpoints[0].manager.Queue() <- createPbftReqBatch(i, uint64(generateBroadcaster(validatorCount)))
+ }
+ net.process() // vp0,1,2 should have a stable checkpoint for seqNo 8
+
+ // Create new pbft instances to restore from persistence
+ for id := 0; id < 2; id++ {
+ pe := net.pbftEndpoints[id]
+ config := loadConfig()
+ config.Set("general.K", "2")
+ pe.pbft.close()
+ pe.pbft = newPbftCore(uint64(id), config, pe.sc, events.NewTimerFactoryImpl(pe.manager))
+ pe.manager.SetReceiver(pe.pbft)
+ pe.pbft.N = 4
+ pe.pbft.f = (4 - 1) / 3
+ pe.pbft.requestTimeout = 200 * time.Millisecond
+ }
+
+ threeOffline = false
+ twoOffline = true
+
+ // Because vp2 is 'offline', and vp3 is still at the genesis block, the network needs to make a view change
+
+ net.pbftEndpoints[0].manager.Queue() <- createPbftReqBatch(9, uint64(generateBroadcaster(validatorCount)))
+ net.process()
+
+ // Now vp0,1,3 should be in sync with 9 executions in view 1, and vp2 should be at 8 executions in view 0
+ for i, pep := range net.pbftEndpoints {
+
+ if i == 2 {
+ // 2 is 'offline'
+ if pep.pbft.view != 0 {
+ t.Errorf("Expected replica %d to be in view 0, got %d", pep.id, pep.pbft.view)
+ }
+ expectedExecutions := uint64(8)
+ if pep.sc.executions != expectedExecutions {
+ t.Errorf("Expected %d executions on replica %d, got %d", expectedExecutions, pep.id, pep.sc.executions)
+ }
+ continue
+ }
+
+ if pep.pbft.view != 1 {
+ t.Errorf("Expected replica %d to be in view 1, got %d", pep.id, pep.pbft.view)
+ }
+
+ expectedExecutions := uint64(9)
+ if pep.sc.executions != expectedExecutions {
+ t.Errorf("Expected %d executions on replica %d, got %d", expectedExecutions, pep.id, pep.sc.executions)
+ }
+ }
+}
+
+func TestReplicaPersistQSet(t *testing.T) {
+ persist := make(map[string][]byte)
+
+ stack := &omniProto{
+ broadcastImpl: func(msg []byte) {
+ },
+ StoreStateImpl: func(key string, value []byte) error {
+ persist[key] = value
+ return nil
+ },
+ DelStateImpl: func(key string) {
+ delete(persist, key)
+ },
+ ReadStateImpl: func(key string) ([]byte, error) {
+ if val, ok := persist[key]; ok {
+ return val, nil
+ }
+ return nil, fmt.Errorf("key not found")
+ },
+ ReadStateSetImpl: func(prefix string) (map[string][]byte, error) {
+ r := make(map[string][]byte)
+ for k, v := range persist {
+ if len(k) >= len(prefix) && k[0:len(prefix)] == prefix {
+ r[k] = v
+ }
+ }
+ return r, nil
+ },
+ }
+ p := newPbftCore(1, loadConfig(), stack, &inertTimerFactory{})
+ reqBatch := createPbftReqBatch(1, 0)
+ events.SendEvent(p, &PrePrepare{
+ View: 0,
+ SequenceNumber: 1,
+ BatchDigest: hash(reqBatch),
+ RequestBatch: reqBatch,
+ ReplicaId: uint64(0),
+ })
+ p.close()
+
+ p = newPbftCore(1, loadConfig(), stack, &inertTimerFactory{})
+ if !p.prePrepared(hash(reqBatch), 0, 1) {
+ t.Errorf("did not restore qset properly")
+ }
+}
+
+func TestReplicaPersistDelete(t *testing.T) {
+ persist := make(map[string][]byte)
+
+ stack := &omniProto{
+ StoreStateImpl: func(key string, value []byte) error {
+ persist[key] = value
+ return nil
+ },
+ DelStateImpl: func(key string) {
+ delete(persist, key)
+ },
+ }
+ p := newPbftCore(1, loadConfig(), stack, &inertTimerFactory{})
+ p.reqBatchStore["a"] = &RequestBatch{}
+ p.persistRequestBatch("a")
+ if len(persist) != 1 {
+ t.Error("expected one persisted entry")
+ }
+ p.persistDelRequestBatch("a")
+ if len(persist) != 0 {
+ t.Error("expected no persisted entry")
+ }
+}
+
+func TestNilCurrentExec(t *testing.T) {
+ p := newPbftCore(1, loadConfig(), &omniProto{}, &inertTimerFactory{})
+ p.execDoneSync() // Per issue 1538, this would cause a Nil pointer dereference
+}
+
+func TestNetworkNullRequests(t *testing.T) {
+ validatorCount := 4
+ config := loadConfig()
+ config.Set("general.timeout.nullrequest", "200ms")
+ config.Set("general.timeout.request", "500ms")
+ net := makePBFTNetwork(validatorCount, config)
+ defer net.stop()
+
+ net.pbftEndpoints[0].manager.Queue() <- createPbftReqBatch(1, 0)
+
+ go net.processContinually()
+ time.Sleep(3 * time.Second)
+
+ for _, pep := range net.pbftEndpoints {
+ if pep.sc.executions != 1 {
+ t.Errorf("Instance %d executed incorrect number of transactions: %d", pep.id, pep.sc.executions)
+ }
+ if pep.pbft.lastExec <= 1 {
+ t.Errorf("Instance %d: no null requests processed", pep.id)
+ }
+ if pep.pbft.view != 0 {
+ t.Errorf("Instance %d: expected view=0", pep.id)
+ }
+ }
+}
+
+func TestNetworkNullRequestMissing(t *testing.T) {
+ validatorCount := 4
+ config := loadConfig()
+ config.Set("general.timeout.nullrequest", "200ms")
+ config.Set("general.timeout.request", "500ms")
+ net := makePBFTNetwork(validatorCount, config)
+ defer net.stop()
+
+ net.pbftEndpoints[0].pbft.nullRequestTimeout = 0
+
+ net.pbftEndpoints[0].manager.Queue() <- createPbftReqBatch(1, 0)
+
+ go net.processContinually()
+ time.Sleep(3 * time.Second) // Bumped from 2 to 3 seconds because of sporadic CI failures
+
+ for _, pep := range net.pbftEndpoints {
+ if pep.sc.executions != 1 {
+ t.Errorf("Instance %d executed incorrect number of transactions: %d", pep.id, pep.sc.executions)
+ }
+ if pep.pbft.lastExec <= 1 {
+ t.Errorf("Instance %d: no null requests processed", pep.id)
+ }
+ if pep.pbft.view != 1 {
+ t.Errorf("Instance %d: expected view=1", pep.id)
+ }
+ }
+}
+
+func TestNetworkPeriodicViewChange(t *testing.T) {
+ validatorCount := 4
+ config := loadConfig()
+ config.Set("general.K", "2")
+ config.Set("general.logmultiplier", "2")
+ config.Set("general.timeout.request", "500ms")
+ config.Set("general.viewchangeperiod", "1")
+ net := makePBFTNetwork(validatorCount, config)
+ defer net.stop()
+
+ for n := 1; n < 6; n++ {
+ for _, pe := range net.pbftEndpoints {
+ pe.manager.Queue() <- createPbftReqBatch(int64(n), 0)
+ }
+ net.process()
+ }
+
+ for _, pep := range net.pbftEndpoints {
+ if pep.sc.executions != 5 {
+ t.Errorf("Instance %d executed incorrect number of transactions: %d", pep.id, pep.sc.executions)
+ }
+ // We should be in view 2, 2 exec, VC, 2 exec, VC, exec
+ if pep.pbft.view != 2 {
+ t.Errorf("Instance %d: expected view=2", pep.id)
+ }
+ }
+}
+
+func TestNetworkPeriodicViewChangeMissing(t *testing.T) {
+ validatorCount := 4
+ config := loadConfig()
+ config.Set("general.K", "2")
+ config.Set("general.logmultiplier", "2")
+ config.Set("general.timeout.request", "500ms")
+ config.Set("general.viewchangeperiod", "1")
+ net := makePBFTNetwork(validatorCount, config)
+ defer net.stop()
+
+ net.pbftEndpoints[0].pbft.viewChangePeriod = 0
+ net.pbftEndpoints[0].pbft.viewChangeSeqNo = ^uint64(0)
+
+ for n := 1; n < 3; n++ {
+ for _, pe := range net.pbftEndpoints {
+ pe.manager.Queue() <- createPbftReqBatch(int64(n), 0)
+ }
+ net.process()
+ }
+
+ for _, pep := range net.pbftEndpoints {
+ if pep.sc.executions != 2 {
+ t.Errorf("Instance %d executed incorrect number of transactions: %d", pep.id, pep.sc.executions)
+ }
+ if pep.pbft.view != 1 {
+ t.Errorf("Instance %d: expected view=1", pep.id)
+ }
+ }
+}
+
+// TestViewChangeCannotExecuteToCheckpoint tests a replica mid-execution, which receives a view change to a checkpoint above its watermarks
+// but does _not_ have enough commit certificates to reach the checkpoint. state should transfer
+func TestViewChangeCannotExecuteToCheckpoint(t *testing.T) {
+ instance := newPbftCore(3, loadConfig(), &omniProto{
+ broadcastImpl: func(b []byte) {},
+ getStateImpl: func() []byte { return []byte("state") },
+ signImpl: func(b []byte) ([]byte, error) { return b, nil },
+ verifyImpl: func(senderID uint64, signature []byte, message []byte) error { return nil },
+ invalidateStateImpl: func() {},
+ }, &inertTimerFactory{})
+ instance.activeView = false
+ instance.view = 1
+ newViewBaseSeqNo := uint64(10)
+ nextExec := uint64(6)
+ instance.currentExec = &nextExec
+
+ for i := instance.lastExec; i < newViewBaseSeqNo; i++ {
+ commit := &Commit{View: 0, SequenceNumber: i}
+ prepare := &Prepare{View: 0, SequenceNumber: i}
+ instance.certStore[msgID{v: 0, n: i}] = &msgCert{
+ digest: "", // null request
+ prePrepare: &PrePrepare{View: 0, SequenceNumber: i},
+ prepare: []*Prepare{prepare, prepare, prepare},
+ commit: []*Commit{commit, commit, commit},
+ }
+ }
+
+ vset := make([]*ViewChange, 3)
+
+ cset := []*ViewChange_C{
+ {
+ SequenceNumber: newViewBaseSeqNo,
+ Id: base64.StdEncoding.EncodeToString([]byte("Ten")),
+ },
+ }
+
+ for i := 0; i < 3; i++ {
+ // Replica 0 sent checkpoints for 100
+ vset[i] = &ViewChange{
+ H: newViewBaseSeqNo,
+ Cset: cset,
+ }
+ }
+
+ xset := make(map[uint64]string)
+ xset[11] = ""
+
+ instance.lastExec = 9
+
+ instance.newViewStore[1] = &NewView{
+ View: 1,
+ Vset: vset,
+ Xset: xset,
+ ReplicaId: 1,
+ }
+
+ if _, ok := instance.processNewView().(viewChangedEvent); !ok {
+ t.Fatalf("Should have processed the new view")
+ }
+
+ if !instance.skipInProgress {
+ t.Fatalf("Should have done state transfer")
+ }
+}
+
+// TestViewChangeCanExecuteToCheckpoint tests a replica mid-execution, which receives a view change to a checkpoint above its watermarks
+// but which has enough commit certificates to reach the checkpoint. State should not transfer and executions should trigger the view change
+func TestViewChangeCanExecuteToCheckpoint(t *testing.T) {
+ instance := newPbftCore(3, loadConfig(), &omniProto{
+ broadcastImpl: func(b []byte) {},
+ getStateImpl: func() []byte { return []byte("state") },
+ signImpl: func(b []byte) ([]byte, error) { return b, nil },
+ verifyImpl: func(senderID uint64, signature []byte, message []byte) error { return nil },
+ skipToImpl: func(s uint64, id []byte, replicas []uint64) {
+ t.Fatalf("Should not have performed state transfer, should have caught up via execution")
+ },
+ }, &inertTimerFactory{})
+ instance.activeView = false
+ instance.view = 1
+ instance.lastExec = 5
+ newViewBaseSeqNo := uint64(10)
+ nextExec := uint64(6)
+ instance.currentExec = &nextExec
+
+ for i := nextExec + 1; i <= newViewBaseSeqNo; i++ {
+ commit := &Commit{View: 0, SequenceNumber: i}
+ prepare := &Prepare{View: 0, SequenceNumber: i}
+ instance.certStore[msgID{v: 0, n: i}] = &msgCert{
+ digest: "", // null request
+ prePrepare: &PrePrepare{View: 0, SequenceNumber: i},
+ prepare: []*Prepare{prepare, prepare, prepare},
+ commit: []*Commit{commit, commit, commit},
+ }
+ }
+
+ vset := make([]*ViewChange, 3)
+
+ cset := []*ViewChange_C{
+ {
+ SequenceNumber: newViewBaseSeqNo,
+ Id: base64.StdEncoding.EncodeToString([]byte("Ten")),
+ },
+ }
+
+ for i := 0; i < 3; i++ {
+ // Replica 0 sent checkpoints for 100
+ vset[i] = &ViewChange{
+ H: newViewBaseSeqNo,
+ Cset: cset,
+ }
+ }
+
+ xset := make(map[uint64]string)
+ xset[11] = ""
+
+ instance.lastExec = 9
+
+ instance.newViewStore[1] = &NewView{
+ View: 1,
+ Vset: vset,
+ Xset: xset,
+ ReplicaId: 1,
+ }
+
+ if instance.processNewView() != nil {
+ t.Fatalf("Should not have processed the new view")
+ }
+
+ events.SendEvent(instance, execDoneEvent{})
+
+ if !instance.activeView {
+ t.Fatalf("Should have finished processing new view after executions")
+ }
+}
+
+func TestViewWithOldSeqNos(t *testing.T) {
+ instance := newPbftCore(3, loadConfig(), &omniProto{
+ broadcastImpl: func(b []byte) {},
+ signImpl: func(b []byte) ([]byte, error) { return b, nil },
+ verifyImpl: func(senderID uint64, signature []byte, message []byte) error { return nil },
+ }, &inertTimerFactory{})
+ instance.activeView = false
+ instance.view = 1
+
+ vset := make([]*ViewChange, 3)
+
+ cset := []*ViewChange_C{
+ {
+ SequenceNumber: 0,
+ Id: base64.StdEncoding.EncodeToString([]byte("Zero")),
+ },
+ }
+
+ qset := []*ViewChange_PQ{
+ {
+ SequenceNumber: 9,
+ BatchDigest: "nine",
+ View: 0,
+ },
+ {
+ SequenceNumber: 2,
+ BatchDigest: "two",
+ View: 0,
+ },
+ }
+
+ for i := 0; i < 3; i++ {
+ // Replica 0 sent checkpoints for 100
+ vset[i] = &ViewChange{
+ H: 0,
+ Cset: cset,
+ Qset: qset,
+ Pset: qset,
+ }
+ }
+
+ xset := instance.assignSequenceNumbers(vset, 0)
+
+ instance.lastExec = 10
+ instance.moveWatermarks(instance.lastExec)
+
+ instance.newViewStore[1] = &NewView{
+ View: 1,
+ Vset: vset,
+ Xset: xset,
+ ReplicaId: 1,
+ }
+
+ if _, ok := instance.processNewView().(viewChangedEvent); !ok {
+ t.Fatalf("Failed to successfully process new view")
+ }
+
+ for idx, val := range instance.certStore {
+ if idx.n < instance.h {
+ t.Errorf("Found %+v=%+v in certStore who's seqNo < %d", idx, val, instance.h)
+ }
+ }
+}
+
+func TestViewChangeDuringExecution(t *testing.T) {
+ skipped := false
+ instance := newPbftCore(3, loadConfig(), &omniProto{
+ viewChangeImpl: func(v uint64) {},
+ skipToImpl: func(s uint64, id []byte, replicas []uint64) {
+ skipped = true
+ },
+ invalidateStateImpl: func() {},
+ broadcastImpl: func(b []byte) {},
+ signImpl: func(b []byte) ([]byte, error) { return b, nil },
+ verifyImpl: func(senderID uint64, signature []byte, message []byte) error { return nil },
+ }, &inertTimerFactory{})
+ instance.activeView = false
+ instance.view = 1
+ instance.lastExec = 1
+ nextExec := uint64(2)
+ instance.currentExec = &nextExec
+
+ vset := make([]*ViewChange, 3)
+
+ cset := []*ViewChange_C{
+ {
+ SequenceNumber: 100,
+ Id: base64.StdEncoding.EncodeToString([]byte("onehundred")),
+ },
+ }
+
+ // Replica 0 sent checkpoints for 100
+ vset[0] = &ViewChange{
+ H: 90,
+ Cset: cset,
+ }
+
+ // Replica 1 sent checkpoints for 10
+ vset[1] = &ViewChange{
+ H: 90,
+ Cset: cset,
+ }
+
+ // Replica 2 sent checkpoints for 10
+ vset[2] = &ViewChange{
+ H: 90,
+ Cset: cset,
+ }
+
+ xset := make(map[uint64]string)
+ xset[101] = ""
+
+ instance.newViewStore[1] = &NewView{
+ View: 1,
+ Vset: vset,
+ Xset: xset,
+ ReplicaId: 1,
+ }
+
+ if _, ok := instance.processNewView().(viewChangedEvent); !ok {
+ t.Fatalf("Failed to successfully process new view")
+ }
+
+ if skipped {
+ t.Fatalf("Expected state transfer not to be kicked off until execution completes")
+ }
+
+ events.SendEvent(instance, execDoneEvent{})
+
+ if !skipped {
+ t.Fatalf("Expected state transfer to be kicked off once execution completed")
+ }
+}
+
+func TestStateTransferredToOldPoint(t *testing.T) {
+ skipped := false
+ instance := newPbftCore(3, loadConfig(), &omniProto{
+ skipToImpl: func(s uint64, id []byte, replicas []uint64) {
+ skipped = true
+ },
+ invalidateStateImpl: func() {},
+ }, &inertTimerFactory{})
+ instance.moveWatermarks(90)
+ instance.updateHighStateTarget(&stateUpdateTarget{
+ checkpointMessage: checkpointMessage{
+ seqNo: 100,
+ id: []byte("onehundred"),
+ },
+ })
+
+ events.SendEvent(instance, stateUpdatedEvent{
+ chkpt: &checkpointMessage{
+ seqNo: 10,
+ },
+ })
+
+ if !skipped {
+ t.Fatalf("Expected state transfer to be kicked off once execution completed")
+ }
+}
+
+func TestStateNetworkMovesOnDuringSlowStateTransfer(t *testing.T) {
+ instance := newPbftCore(3, loadConfig(), &omniProto{
+ skipToImpl: func(s uint64, id []byte, replicas []uint64) {},
+ invalidateStateImpl: func() {},
+ }, &inertTimerFactory{})
+ instance.skipInProgress = true
+
+ seqNo := uint64(20)
+
+ for i := uint64(0); i < 3; i++ {
+ events.SendEvent(instance, &Checkpoint{
+ SequenceNumber: seqNo,
+ ReplicaId: i,
+ Id: base64.StdEncoding.EncodeToString([]byte("twenty")),
+ })
+ }
+
+ if instance.h != seqNo {
+ t.Fatalf("Expected watermark movement to %d because of state transfer, but low watermark is %d", seqNo, instance.h)
+ }
+}
+
+// This test is designed to ensure state transfer occurs if our checkpoint does not match a quorum cert
+func TestCheckpointDiffersFromQuorum(t *testing.T) {
+ invalidated := false
+ skipped := false
+ instance := newPbftCore(3, loadConfig(), &omniProto{
+ invalidateStateImpl: func() { invalidated = true },
+ skipToImpl: func(s uint64, id []byte, replicas []uint64) { skipped = true },
+ }, &inertTimerFactory{})
+
+ seqNo := uint64(10)
+
+ badChkpt := &Checkpoint{
+ SequenceNumber: 10,
+ Id: base64.StdEncoding.EncodeToString([]byte("WRONG")),
+ ReplicaId: 0,
+ }
+ instance.chkpts[seqNo] = badChkpt.Id // This is done via the exec path, shortcut it here
+ events.SendEvent(instance, badChkpt)
+
+ for i := uint64(1); i <= 3; i++ {
+ events.SendEvent(instance, &Checkpoint{
+ SequenceNumber: 10,
+ Id: base64.StdEncoding.EncodeToString([]byte("CORRECT")),
+ ReplicaId: i,
+ })
+ }
+
+ if instance.h != 10 {
+ t.Fatalf("Replica should have moved its watermarks but did not")
+ }
+
+ if !instance.skipInProgress {
+ t.Fatalf("Replica should be attempting state transfer")
+ }
+
+ if !invalidated || !skipped {
+ t.Fatalf("Replica should have invalidated its state and skipped")
+ }
+}
diff --git a/consensus/pbft/pbft-persist.go b/consensus/pbft/pbft-persist.go
new file mode 100644
index 00000000000..adb21ab3348
--- /dev/null
+++ b/consensus/pbft/pbft-persist.go
@@ -0,0 +1,176 @@
+/*
+Copyright IBM Corp. 2016 All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package pbft
+
+import (
+ "encoding/base64"
+ "fmt"
+
+ "github.com/golang/protobuf/proto"
+)
+
+func (instance *pbftCore) persistQSet() {
+ var qset []*ViewChange_PQ
+
+ for _, q := range instance.calcQSet() {
+ qset = append(qset, q)
+ }
+
+ instance.persistPQSet("qset", qset)
+}
+
+func (instance *pbftCore) persistPSet() {
+ var pset []*ViewChange_PQ
+
+ for _, p := range instance.calcPSet() {
+ pset = append(pset, p)
+ }
+
+ instance.persistPQSet("pset", pset)
+}
+
+func (instance *pbftCore) persistPQSet(key string, set []*ViewChange_PQ) {
+ raw, err := proto.Marshal(&PQset{set})
+ if err != nil {
+ logger.Warningf("Replica %d could not persist pqset: %s", instance.id, err)
+ return
+ }
+ instance.consumer.StoreState(key, raw)
+}
+
+func (instance *pbftCore) restorePQSet(key string) []*ViewChange_PQ {
+ raw, err := instance.consumer.ReadState(key)
+ if err != nil {
+ logger.Debugf("Replica %d could not restore state %s: %s", instance.id, key, err)
+ return nil
+ }
+ val := &PQset{}
+ err = proto.Unmarshal(raw, val)
+ if err != nil {
+ logger.Errorf("Replica %d could not unmarshal %s - local state is damaged: %s", instance.id, key, err)
+ return nil
+ }
+ return val.GetSet()
+}
+
+func (instance *pbftCore) persistRequestBatch(digest string) {
+ reqBatch := instance.reqBatchStore[digest]
+ reqBatchPacked, err := proto.Marshal(reqBatch)
+ if err != nil {
+ logger.Warningf("Replica %d could not persist request batch %s: %s", instance.id, digest, err)
+ return
+ }
+ instance.consumer.StoreState("reqBatch."+digest, reqBatchPacked)
+}
+
+func (instance *pbftCore) persistDelRequestBatch(digest string) {
+ instance.consumer.DelState("reqBatch." + digest)
+}
+
+func (instance *pbftCore) persistDelAllRequestBatches() {
+ reqBatches, err := instance.consumer.ReadStateSet("reqBatch.")
+ if err == nil {
+ for k := range reqBatches {
+ instance.consumer.DelState(k)
+ }
+ }
+}
+
+func (instance *pbftCore) persistCheckpoint(seqNo uint64, id []byte) {
+ key := fmt.Sprintf("chkpt.%d", seqNo)
+ instance.consumer.StoreState(key, id)
+}
+
+func (instance *pbftCore) persistDelCheckpoint(seqNo uint64) {
+ key := fmt.Sprintf("chkpt.%d", seqNo)
+ instance.consumer.DelState(key)
+}
+
+func (instance *pbftCore) restoreState() {
+ updateSeqView := func(set []*ViewChange_PQ) {
+ for _, e := range set {
+ if instance.view < e.View {
+ instance.view = e.View
+ }
+ if instance.seqNo < e.SequenceNumber {
+ instance.seqNo = e.SequenceNumber
+ }
+ }
+ }
+
+ set := instance.restorePQSet("pset")
+ for _, e := range set {
+ instance.pset[e.SequenceNumber] = e
+ }
+ updateSeqView(set)
+
+ set = instance.restorePQSet("qset")
+ for _, e := range set {
+ instance.qset[qidx{e.BatchDigest, e.SequenceNumber}] = e
+ }
+ updateSeqView(set)
+
+ reqBatchesPacked, err := instance.consumer.ReadStateSet("reqBatch.")
+ if err == nil {
+ for k, v := range reqBatchesPacked {
+ reqBatch := &RequestBatch{}
+ err = proto.Unmarshal(v, reqBatch)
+ if err != nil {
+ logger.Warningf("Replica %d could not restore request batch %s", instance.id, k)
+ } else {
+ instance.reqBatchStore[hash(reqBatch)] = reqBatch
+ }
+ }
+ } else {
+ logger.Warningf("Replica %d could not restore reqBatchStore: %s", instance.id, err)
+ }
+
+ chkpts, err := instance.consumer.ReadStateSet("chkpt.")
+ if err == nil {
+ highSeq := uint64(0)
+ for key, id := range chkpts {
+ var seqNo uint64
+ if _, err = fmt.Sscanf(key, "chkpt.%d", &seqNo); err != nil {
+ logger.Warningf("Replica %d could not restore checkpoint key %s", instance.id, key)
+ } else {
+ idAsString := base64.StdEncoding.EncodeToString(id)
+ logger.Debugf("Replica %d found checkpoint %s for seqNo %d", instance.id, idAsString, seqNo)
+ instance.chkpts[seqNo] = idAsString
+ if seqNo > highSeq {
+ highSeq = seqNo
+ }
+ }
+ }
+ instance.moveWatermarks(highSeq)
+ } else {
+ logger.Warningf("Replica %d could not restore checkpoints: %s", instance.id, err)
+ }
+
+ instance.restoreLastSeqNo()
+
+ logger.Infof("Replica %d restored state: view: %d, seqNo: %d, pset: %d, qset: %d, reqBatches: %d, chkpts: %d",
+ instance.id, instance.view, instance.seqNo, len(instance.pset), len(instance.qset), len(instance.reqBatchStore), len(instance.chkpts))
+}
+
+func (instance *pbftCore) restoreLastSeqNo() {
+ var err error
+ if instance.lastExec, err = instance.consumer.getLastSeqNo(); err != nil {
+ logger.Warningf("Replica %d could not restore lastExec: %s", instance.id, err)
+ instance.lastExec = 0
+ }
+ logger.Infof("Replica %d restored lastExec: %d", instance.id, instance.lastExec)
+}
diff --git a/consensus/pbft/pbft.go b/consensus/pbft/pbft.go
new file mode 100644
index 00000000000..d0d30432b34
--- /dev/null
+++ b/consensus/pbft/pbft.go
@@ -0,0 +1,159 @@
+/*
+Copyright IBM Corp. 2016 All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package pbft
+
+import (
+ "fmt"
+ "os"
+ "path/filepath"
+ "strconv"
+ "strings"
+
+ "github.com/hyperledger/fabric/consensus"
+ pb "github.com/hyperledger/fabric/protos"
+
+ "github.com/golang/protobuf/proto"
+ "github.com/spf13/viper"
+)
+
+const configPrefix = "CORE_PBFT"
+
+var pluginInstance consensus.Consenter // singleton service
+var config *viper.Viper
+
+func init() {
+ config = loadConfig()
+}
+
+// GetPlugin returns the handle to the Consenter singleton
+func GetPlugin(c consensus.Stack) consensus.Consenter {
+ if pluginInstance == nil {
+ pluginInstance = New(c)
+ }
+ return pluginInstance
+}
+
+// New creates a new Obc* instance that provides the Consenter interface.
+// Internally, it uses an opaque pbft-core instance.
+func New(stack consensus.Stack) consensus.Consenter {
+ handle, _, _ := stack.GetNetworkHandles()
+ id, _ := getValidatorID(handle)
+
+ switch strings.ToLower(config.GetString("general.mode")) {
+ case "batch":
+ return newObcBatch(id, config, stack)
+ default:
+ panic(fmt.Errorf("Invalid PBFT mode: %s", config.GetString("general.mode")))
+ }
+}
+
+func loadConfig() (config *viper.Viper) {
+ config = viper.New()
+
+ // for environment variables
+ config.SetEnvPrefix(configPrefix)
+ config.AutomaticEnv()
+ replacer := strings.NewReplacer(".", "_")
+ config.SetEnvKeyReplacer(replacer)
+
+ config.SetConfigName("config")
+ config.AddConfigPath("./")
+ config.AddConfigPath("../consensus/pbft/")
+ config.AddConfigPath("../../consensus/pbft")
+ // Path to look for the config file in based on GOPATH
+ gopath := os.Getenv("GOPATH")
+ for _, p := range filepath.SplitList(gopath) {
+ pbftpath := filepath.Join(p, "src/github.com/hyperledger/fabric/consensus/pbft")
+ config.AddConfigPath(pbftpath)
+ }
+
+ err := config.ReadInConfig()
+ if err != nil {
+ panic(fmt.Errorf("Error reading %s plugin config: %s", configPrefix, err))
+ }
+ return
+}
+
+// Returns the uint64 ID corresponding to a peer handle
+func getValidatorID(handle *pb.PeerID) (id uint64, err error) {
+ // as requested here: https://github.com/hyperledger/fabric/issues/462#issuecomment-170785410
+ if startsWith := strings.HasPrefix(handle.Name, "vp"); startsWith {
+ id, err = strconv.ParseUint(handle.Name[2:], 10, 64)
+ if err != nil {
+ return id, fmt.Errorf("Error extracting ID from \"%s\" handle: %v", handle.Name, err)
+ }
+ return
+ }
+
+ err = fmt.Errorf(`For MVP, set the VP's peer.id to vpX,
+ where X is a unique integer between 0 and N-1
+ (N being the maximum number of VPs in the network`)
+ return
+}
+
+// Returns the peer handle that corresponds to a validator ID (uint64 assigned to it for PBFT)
+func getValidatorHandle(id uint64) (handle *pb.PeerID, err error) {
+ // as requested here: https://github.com/hyperledger/fabric/issues/462#issuecomment-170785410
+ name := "vp" + strconv.FormatUint(id, 10)
+ return &pb.PeerID{Name: name}, nil
+}
+
+// Returns the peer handles corresponding to a list of replica ids
+func getValidatorHandles(ids []uint64) (handles []*pb.PeerID) {
+ handles = make([]*pb.PeerID, len(ids))
+ for i, id := range ids {
+ handles[i], _ = getValidatorHandle(id)
+ }
+ return
+}
+
+type obcGeneric struct {
+ stack consensus.Stack
+ pbft *pbftCore
+}
+
+func (op *obcGeneric) skipTo(seqNo uint64, id []byte, replicas []uint64) {
+ info := &pb.BlockchainInfo{}
+ err := proto.Unmarshal(id, info)
+ if err != nil {
+ logger.Error(fmt.Sprintf("Error unmarshaling: %s", err))
+ return
+ }
+ op.stack.UpdateState(&checkpointMessage{seqNo, id}, info, getValidatorHandles(replicas))
+}
+
+func (op *obcGeneric) invalidateState() {
+ op.stack.InvalidateState()
+}
+
+func (op *obcGeneric) validateState() {
+ op.stack.ValidateState()
+}
+
+func (op *obcGeneric) getState() []byte {
+ return op.stack.GetBlockchainInfoBlob()
+}
+
+func (op *obcGeneric) getLastSeqNo() (uint64, error) {
+ raw, err := op.stack.GetBlockHeadMetadata()
+ if err != nil {
+ return 0, err
+ }
+ meta := &Metadata{}
+ proto.Unmarshal(raw, meta)
+ return meta.SeqNo, nil
+}
diff --git a/consensus/pbft/persist-forward.go b/consensus/pbft/persist-forward.go
new file mode 100644
index 00000000000..cddc6a56b63
--- /dev/null
+++ b/consensus/pbft/persist-forward.go
@@ -0,0 +1,41 @@
+/*
+Copyright IBM Corp. 2016 All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package pbft
+
+import (
+ "github.com/hyperledger/fabric/consensus"
+)
+
+type persistForward struct {
+ persistor consensus.StatePersistor
+}
+
+func (p persistForward) ReadState(key string) ([]byte, error) {
+ return p.persistor.ReadState(key)
+}
+
+func (p persistForward) ReadStateSet(prefix string) (map[string][]byte, error) {
+ return p.persistor.ReadStateSet(prefix)
+}
+
+func (p persistForward) StoreState(key string, val []byte) error {
+ return p.persistor.StoreState(key, val)
+}
+
+func (p persistForward) DelState(key string) {
+ p.persistor.DelState(key)
+}
diff --git a/consensus/pbft/requeststore.go b/consensus/pbft/requeststore.go
new file mode 100644
index 00000000000..370c18fe18f
--- /dev/null
+++ b/consensus/pbft/requeststore.go
@@ -0,0 +1,147 @@
+/*
+Copyright IBM Corp. 2016 All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package pbft
+
+import "container/list"
+
+type requestContainer struct {
+ key string
+ req *Request
+}
+
+type orderedRequests struct {
+ order list.List
+ presence map[string]*list.Element
+}
+
+func (a *orderedRequests) Len() int {
+ return a.order.Len()
+}
+
+func (a *orderedRequests) wrapRequest(req *Request) requestContainer {
+ return requestContainer{
+ key: hash(req),
+ req: req,
+ }
+}
+
+func (a *orderedRequests) has(key string) bool {
+ _, ok := a.presence[key]
+ return ok
+}
+
+func (a *orderedRequests) add(request *Request) {
+ rc := a.wrapRequest(request)
+ if !a.has(rc.key) {
+ e := a.order.PushBack(rc)
+ a.presence[rc.key] = e
+ }
+}
+
+func (a *orderedRequests) adds(requests []*Request) {
+ for _, req := range requests {
+ a.add(req)
+ }
+}
+
+func (a *orderedRequests) remove(request *Request) bool {
+ rc := a.wrapRequest(request)
+ e, ok := a.presence[rc.key]
+ if !ok {
+ return false
+ }
+ a.order.Remove(e)
+ delete(a.presence, rc.key)
+ return true
+}
+
+func (a *orderedRequests) removes(requests []*Request) bool {
+ allSuccess := true
+ for _, req := range requests {
+ if !a.remove(req) {
+ allSuccess = false
+ }
+ }
+
+ return allSuccess
+}
+
+func (a *orderedRequests) empty() {
+ a.order.Init()
+ a.presence = make(map[string]*list.Element)
+}
+
+type requestStore struct {
+ outstandingRequests *orderedRequests
+ pendingRequests *orderedRequests
+}
+
+// newRequestStore creates a new requestStore.
+func newRequestStore() *requestStore {
+ rs := &requestStore{
+ outstandingRequests: &orderedRequests{},
+ pendingRequests: &orderedRequests{},
+ }
+ // initialize data structures
+ rs.outstandingRequests.empty()
+ rs.pendingRequests.empty()
+
+ return rs
+}
+
+// storeOutstanding adds a request to the outstanding request list
+func (rs *requestStore) storeOutstanding(request *Request) {
+ rs.outstandingRequests.add(request)
+}
+
+// storePending adds a request to the pending request list
+func (rs *requestStore) storePending(request *Request) {
+ rs.pendingRequests.add(request)
+}
+
+// storePending adds a slice of requests to the pending request list
+func (rs *requestStore) storePendings(requests []*Request) {
+ rs.pendingRequests.adds(requests)
+}
+
+// remove deletes the request from both the outstanding and pending lists, it returns whether it was found in each list respectively
+func (rs *requestStore) remove(request *Request) (outstanding, pending bool) {
+ outstanding = rs.outstandingRequests.remove(request)
+ pending = rs.pendingRequests.remove(request)
+ return
+}
+
+// getNextNonPending returns up to the next n outstanding, but not pending requests
+func (rs *requestStore) hasNonPending() bool {
+ return rs.outstandingRequests.Len() > rs.pendingRequests.Len()
+}
+
+// getNextNonPending returns up to the next n outstanding, but not pending requests
+func (rs *requestStore) getNextNonPending(n int) (result []*Request) {
+ for oreqc := rs.outstandingRequests.order.Front(); oreqc != nil; oreqc = oreqc.Next() {
+ oreq := oreqc.Value.(requestContainer)
+ if rs.pendingRequests.has(oreq.key) {
+ continue
+ }
+ result = append(result, oreq.req)
+ if len(result) == n {
+ break
+ }
+ }
+
+ return result
+}
diff --git a/consensus/pbft/requeststore_test.go b/consensus/pbft/requeststore_test.go
new file mode 100644
index 00000000000..959a959192b
--- /dev/null
+++ b/consensus/pbft/requeststore_test.go
@@ -0,0 +1,83 @@
+/*
+Copyright IBM Corp. 2016 All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package pbft
+
+import "testing"
+
+func TestOrderedRequests(t *testing.T) {
+ or := &orderedRequests{}
+ or.empty()
+
+ r1 := createPbftReq(2, 1)
+ r2 := createPbftReq(2, 2)
+ r3 := createPbftReq(19, 1)
+ if or.has(or.wrapRequest(r1).key) {
+ t.Errorf("should not have req")
+ }
+ or.add(r1)
+ if !or.has(or.wrapRequest(r1).key) {
+ t.Errorf("should have req")
+ }
+ if or.has(or.wrapRequest(r2).key) {
+ t.Errorf("should not have req")
+ }
+ if or.remove(r2) {
+ t.Errorf("should not have removed req")
+ }
+ if !or.remove(r1) {
+ t.Errorf("should have removed req")
+ }
+ if or.remove(r1) {
+ t.Errorf("should not have removed req")
+ }
+ if or.order.Len() != 0 || len(or.presence) != 0 {
+ t.Errorf("should have 0 len")
+ }
+ or.adds([]*Request{r1, r2, r3})
+
+ if or.order.Back().Value.(requestContainer).req != r3 {
+ t.Errorf("incorrect order")
+ }
+}
+
+func BenchmarkOrderedRequests(b *testing.B) {
+ or := &orderedRequests{}
+ or.empty()
+
+ Nreq := 1000
+
+ reqs := make(map[string]*Request)
+ for i := 0; i < Nreq; i++ {
+ rc := or.wrapRequest(createPbftReq(int64(i), 0))
+ reqs[rc.key] = rc.req
+ }
+ b.ResetTimer()
+
+ for i := 0; i < b.N; i++ {
+ for _, r := range reqs {
+ or.add(r)
+ }
+
+ for k := range reqs {
+ _ = or.has(k)
+ }
+
+ for _, r := range reqs {
+ or.remove(r)
+ }
+ }
+}
diff --git a/consensus/pbft/sign.go b/consensus/pbft/sign.go
new file mode 100644
index 00000000000..68f19a42a79
--- /dev/null
+++ b/consensus/pbft/sign.go
@@ -0,0 +1,73 @@
+/*
+Copyright IBM Corp. 2016 All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package pbft
+
+import pb "github.com/golang/protobuf/proto"
+
+type signable interface {
+ getSignature() []byte
+ setSignature(s []byte)
+ getID() uint64
+ setID(id uint64)
+ serialize() ([]byte, error)
+}
+
+func (instance *pbftCore) sign(s signable) error {
+ s.setSignature(nil)
+ raw, err := s.serialize()
+ if err != nil {
+ return err
+ }
+ signedRaw, err := instance.consumer.sign(raw)
+ if err != nil {
+ return err
+ }
+ s.setSignature(signedRaw)
+
+ return nil
+}
+
+func (instance *pbftCore) verify(s signable) error {
+ origSig := s.getSignature()
+ s.setSignature(nil)
+ raw, err := s.serialize()
+ s.setSignature(origSig)
+ if err != nil {
+ return err
+ }
+ return instance.consumer.verify(s.getID(), origSig, raw)
+}
+
+func (vc *ViewChange) getSignature() []byte {
+ return vc.Signature
+}
+
+func (vc *ViewChange) setSignature(sig []byte) {
+ vc.Signature = sig
+}
+
+func (vc *ViewChange) getID() uint64 {
+ return vc.ReplicaId
+}
+
+func (vc *ViewChange) setID(id uint64) {
+ vc.ReplicaId = id
+}
+
+func (vc *ViewChange) serialize() ([]byte, error) {
+ return pb.Marshal(vc)
+}
diff --git a/consensus/pbft/util.go b/consensus/pbft/util.go
new file mode 100644
index 00000000000..d6811b5b083
--- /dev/null
+++ b/consensus/pbft/util.go
@@ -0,0 +1,42 @@
+/*
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements. See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership. The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License. You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing,
+software distributed under the License is distributed on an
+"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+KIND, either express or implied. See the License for the
+specific language governing permissions and limitations
+under the License.
+*/
+
+package pbft
+
+import (
+ "encoding/base64"
+
+ "github.com/golang/protobuf/proto"
+ "github.com/hyperledger/fabric/core/util"
+)
+
+func hash(msg interface{}) string {
+ var raw []byte
+ switch converted := msg.(type) {
+ case *Request:
+ raw, _ = proto.Marshal(converted)
+ case *RequestBatch:
+ raw, _ = proto.Marshal(converted)
+ default:
+ logger.Error("Asked to hash non-supported message type, ignoring")
+ return ""
+ }
+ return base64.StdEncoding.EncodeToString(util.ComputeCryptoHash(raw))
+
+}
diff --git a/consensus/pbft/viewchange.go b/consensus/pbft/viewchange.go
new file mode 100644
index 00000000000..64ebe9259f6
--- /dev/null
+++ b/consensus/pbft/viewchange.go
@@ -0,0 +1,671 @@
+/*
+Copyright IBM Corp. 2016 All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package pbft
+
+import (
+ "encoding/base64"
+ "fmt"
+ "reflect"
+
+ "github.com/hyperledger/fabric/consensus/util/events"
+)
+
+// viewChangeQuorumEvent is returned to the event loop when a new ViewChange message is received which is part of a quorum cert
+type viewChangeQuorumEvent struct{}
+
+func (instance *pbftCore) correctViewChange(vc *ViewChange) bool {
+ for _, p := range append(vc.Pset, vc.Qset...) {
+ if !(p.View < vc.View && p.SequenceNumber > vc.H && p.SequenceNumber <= vc.H+instance.L) {
+ logger.Debugf("Replica %d invalid p entry in view-change: vc(v:%d h:%d) p(v:%d n:%d)",
+ instance.id, vc.View, vc.H, p.View, p.SequenceNumber)
+ return false
+ }
+ }
+
+ for _, c := range vc.Cset {
+ // PBFT: the paper says c.n > vc.h
+ if !(c.SequenceNumber >= vc.H && c.SequenceNumber <= vc.H+instance.L) {
+ logger.Debugf("Replica %d invalid c entry in view-change: vc(v:%d h:%d) c(n:%d)",
+ instance.id, vc.View, vc.H, c.SequenceNumber)
+ return false
+ }
+ }
+
+ return true
+}
+
+func (instance *pbftCore) calcPSet() map[uint64]*ViewChange_PQ {
+ pset := make(map[uint64]*ViewChange_PQ)
+
+ for n, p := range instance.pset {
+ pset[n] = p
+ }
+
+ // P set: requests that have prepared here
+ //
+ // " has a prepared certificate, and no request
+ // prepared in a later view with the same number"
+
+ for idx, cert := range instance.certStore {
+ if cert.prePrepare == nil {
+ continue
+ }
+
+ digest := cert.digest
+ if !instance.prepared(digest, idx.v, idx.n) {
+ continue
+ }
+
+ if p, ok := pset[idx.n]; ok && p.View > idx.v {
+ continue
+ }
+
+ pset[idx.n] = &ViewChange_PQ{
+ SequenceNumber: idx.n,
+ BatchDigest: digest,
+ View: idx.v,
+ }
+ }
+
+ return pset
+}
+
+func (instance *pbftCore) calcQSet() map[qidx]*ViewChange_PQ {
+ qset := make(map[qidx]*ViewChange_PQ)
+
+ for n, q := range instance.qset {
+ qset[n] = q
+ }
+
+ // Q set: requests that have pre-prepared here (pre-prepare or
+ // prepare sent)
+ //
+ // ": requests that pre-prepared here, and did not
+ // pre-prepare in a later view with the same number"
+
+ for idx, cert := range instance.certStore {
+ if cert.prePrepare == nil {
+ continue
+ }
+
+ digest := cert.digest
+ if !instance.prePrepared(digest, idx.v, idx.n) {
+ continue
+ }
+
+ qi := qidx{digest, idx.n}
+ if q, ok := qset[qi]; ok && q.View > idx.v {
+ continue
+ }
+
+ qset[qi] = &ViewChange_PQ{
+ SequenceNumber: idx.n,
+ BatchDigest: digest,
+ View: idx.v,
+ }
+ }
+
+ return qset
+}
+
+func (instance *pbftCore) sendViewChange() events.Event {
+ instance.stopTimer()
+
+ delete(instance.newViewStore, instance.view)
+ instance.view++
+ instance.activeView = false
+
+ instance.pset = instance.calcPSet()
+ instance.qset = instance.calcQSet()
+
+ // clear old messages
+ for idx := range instance.certStore {
+ if idx.v < instance.view {
+ delete(instance.certStore, idx)
+ }
+ }
+ for idx := range instance.viewChangeStore {
+ if idx.v < instance.view {
+ delete(instance.viewChangeStore, idx)
+ }
+ }
+
+ vc := &ViewChange{
+ View: instance.view,
+ H: instance.h,
+ ReplicaId: instance.id,
+ }
+
+ for n, id := range instance.chkpts {
+ vc.Cset = append(vc.Cset, &ViewChange_C{
+ SequenceNumber: n,
+ Id: id,
+ })
+ }
+
+ for _, p := range instance.pset {
+ if p.SequenceNumber < instance.h {
+ logger.Errorf("BUG! Replica %d should not have anything in our pset less than h, found %+v", instance.id, p)
+ }
+ vc.Pset = append(vc.Pset, p)
+ }
+
+ for _, q := range instance.qset {
+ if q.SequenceNumber < instance.h {
+ logger.Errorf("BUG! Replica %d should not have anything in our qset less than h, found %+v", instance.id, q)
+ }
+ vc.Qset = append(vc.Qset, q)
+ }
+
+ instance.sign(vc)
+
+ logger.Infof("Replica %d sending view-change, v:%d, h:%d, |C|:%d, |P|:%d, |Q|:%d",
+ instance.id, vc.View, vc.H, len(vc.Cset), len(vc.Pset), len(vc.Qset))
+
+ instance.innerBroadcast(&Message{Payload: &Message_ViewChange{ViewChange: vc}})
+
+ instance.vcResendTimer.Reset(instance.vcResendTimeout, viewChangeResendTimerEvent{})
+
+ return instance.recvViewChange(vc)
+}
+
+func (instance *pbftCore) recvViewChange(vc *ViewChange) events.Event {
+ logger.Infof("Replica %d received view-change from replica %d, v:%d, h:%d, |C|:%d, |P|:%d, |Q|:%d",
+ instance.id, vc.ReplicaId, vc.View, vc.H, len(vc.Cset), len(vc.Pset), len(vc.Qset))
+
+ if err := instance.verify(vc); err != nil {
+ logger.Warningf("Replica %d found incorrect signature in view-change message: %s", instance.id, err)
+ return nil
+ }
+
+ if vc.View < instance.view {
+ logger.Warningf("Replica %d found view-change message for old view", instance.id)
+ return nil
+ }
+
+ if !instance.correctViewChange(vc) {
+ logger.Warningf("Replica %d found view-change message incorrect", instance.id)
+ return nil
+ }
+
+ if _, ok := instance.viewChangeStore[vcidx{vc.View, vc.ReplicaId}]; ok {
+ logger.Warningf("Replica %d already has a view change message for view %d from replica %d", instance.id, vc.View, vc.ReplicaId)
+ return nil
+ }
+
+ instance.viewChangeStore[vcidx{vc.View, vc.ReplicaId}] = vc
+
+ // PBFT TOCS 4.5.1 Liveness: "if a replica receives a set of
+ // f+1 valid VIEW-CHANGE messages from other replicas for
+ // views greater than its current view, it sends a VIEW-CHANGE
+ // message for the smallest view in the set, even if its timer
+ // has not expired"
+ replicas := make(map[uint64]bool)
+ minView := uint64(0)
+ for idx := range instance.viewChangeStore {
+ if idx.v <= instance.view {
+ continue
+ }
+
+ replicas[idx.id] = true
+ if minView == 0 || idx.v < minView {
+ minView = idx.v
+ }
+ }
+
+ // We only enter this if there are enough view change messages _greater_ than our current view
+ if len(replicas) >= instance.f+1 {
+ logger.Infof("Replica %d received f+1 view-change messages, triggering view-change to view %d",
+ instance.id, minView)
+ // subtract one, because sendViewChange() increments
+ instance.view = minView - 1
+ return instance.sendViewChange()
+ }
+
+ quorum := 0
+ for idx := range instance.viewChangeStore {
+ if idx.v == instance.view {
+ quorum++
+ }
+ }
+ logger.Debugf("Replica %d now has %d view change requests for view %d", instance.id, quorum, instance.view)
+
+ if !instance.activeView && vc.View == instance.view && quorum >= instance.allCorrectReplicasQuorum() {
+ if quorum >= instance.allCorrectReplicasQuorum() {
+ instance.vcResendTimer.Stop()
+ instance.startTimer(instance.lastNewViewTimeout, "new view change")
+ instance.lastNewViewTimeout = 2 * instance.lastNewViewTimeout
+ return viewChangeQuorumEvent{}
+ }
+
+ }
+
+ return nil
+}
+
+func (instance *pbftCore) sendNewView() events.Event {
+
+ if _, ok := instance.newViewStore[instance.view]; ok {
+ logger.Debugf("Replica %d already has new view in store for view %d, skipping", instance.id, instance.view)
+ return nil
+ }
+
+ vset := instance.getViewChanges()
+
+ cp, ok, _ := instance.selectInitialCheckpoint(vset)
+ if !ok {
+ logger.Infof("Replica %d could not find consistent checkpoint: %+v", instance.id, instance.viewChangeStore)
+ return nil
+ }
+
+ msgList := instance.assignSequenceNumbers(vset, cp.SequenceNumber)
+ if msgList == nil {
+ logger.Infof("Replica %d could not assign sequence numbers for new view", instance.id)
+ return nil
+ }
+
+ nv := &NewView{
+ View: instance.view,
+ Vset: vset,
+ Xset: msgList,
+ ReplicaId: instance.id,
+ }
+
+ logger.Infof("Replica %d is new primary, sending new-view, v:%d, X:%+v",
+ instance.id, nv.View, nv.Xset)
+
+ instance.innerBroadcast(&Message{Payload: &Message_NewView{NewView: nv}})
+ instance.newViewStore[instance.view] = nv
+ return instance.processNewView()
+}
+
+func (instance *pbftCore) recvNewView(nv *NewView) events.Event {
+ logger.Infof("Replica %d received new-view %d",
+ instance.id, nv.View)
+
+ if !(nv.View > 0 && nv.View >= instance.view && instance.primary(nv.View) == nv.ReplicaId && instance.newViewStore[nv.View] == nil) {
+ logger.Infof("Replica %d rejecting invalid new-view from %d, v:%d",
+ instance.id, nv.ReplicaId, nv.View)
+ return nil
+ }
+
+ for _, vc := range nv.Vset {
+ if err := instance.verify(vc); err != nil {
+ logger.Warningf("Replica %d found incorrect view-change signature in new-view message: %s", instance.id, err)
+ return nil
+ }
+ }
+
+ instance.newViewStore[nv.View] = nv
+ return instance.processNewView()
+}
+
+func (instance *pbftCore) processNewView() events.Event {
+ var newReqBatchMissing bool
+ nv, ok := instance.newViewStore[instance.view]
+ if !ok {
+ logger.Debugf("Replica %d ignoring processNewView as it could not find view %d in its newViewStore", instance.id, instance.view)
+ return nil
+ }
+
+ if instance.activeView {
+ logger.Infof("Replica %d ignoring new-view from %d, v:%d: we are active in view %d",
+ instance.id, nv.ReplicaId, nv.View, instance.view)
+ return nil
+ }
+
+ cp, ok, replicas := instance.selectInitialCheckpoint(nv.Vset)
+ if !ok {
+ logger.Warningf("Replica %d could not determine initial checkpoint: %+v",
+ instance.id, instance.viewChangeStore)
+ return instance.sendViewChange()
+ }
+
+ speculativeLastExec := instance.lastExec
+ if instance.currentExec != nil {
+ speculativeLastExec = *instance.currentExec
+ }
+
+ // If we have not reached the sequence number, check to see if we can reach it without state transfer
+ // In general, executions are better than state transfer
+ if speculativeLastExec < cp.SequenceNumber {
+ canExecuteToTarget := true
+ outer:
+ for seqNo := speculativeLastExec + 1; seqNo <= cp.SequenceNumber; seqNo++ {
+ found := false
+ for idx, cert := range instance.certStore {
+ if idx.n != seqNo {
+ continue
+ }
+
+ quorum := 0
+ for _, p := range cert.commit {
+ // Was this committed in the previous view
+ if p.View == idx.v && p.SequenceNumber == seqNo {
+ quorum++
+ }
+ }
+
+ if quorum < instance.intersectionQuorum() {
+ logger.Debugf("Replica %d missing quorum of commit certificate for seqNo=%d, only has %d of %d", instance.id, quorum, instance.intersectionQuorum())
+ continue
+ }
+
+ found = true
+ break
+ }
+
+ if !found {
+ canExecuteToTarget = false
+ logger.Debugf("Replica %d missing commit certificate for seqNo=%d", instance.id, seqNo)
+ break outer
+ }
+
+ }
+
+ if canExecuteToTarget {
+ logger.Debugf("Replica %d needs to process a new view, but can execute to the checkpoint seqNo %d, delaying processing of new view", instance.id, cp.SequenceNumber)
+ return nil
+ }
+
+ logger.Infof("Replica %d cannot execute to the view change checkpoint with seqNo %d", instance.id, cp.SequenceNumber)
+ }
+
+ msgList := instance.assignSequenceNumbers(nv.Vset, cp.SequenceNumber)
+ if msgList == nil {
+ logger.Warningf("Replica %d could not assign sequence numbers: %+v",
+ instance.id, instance.viewChangeStore)
+ return instance.sendViewChange()
+ }
+
+ if !(len(msgList) == 0 && len(nv.Xset) == 0) && !reflect.DeepEqual(msgList, nv.Xset) {
+ logger.Warningf("Replica %d failed to verify new-view Xset: computed %+v, received %+v",
+ instance.id, msgList, nv.Xset)
+ return instance.sendViewChange()
+ }
+
+ if instance.h < cp.SequenceNumber {
+ instance.moveWatermarks(cp.SequenceNumber)
+ }
+
+ if speculativeLastExec < cp.SequenceNumber {
+ logger.Warningf("Replica %d missing base checkpoint %d (%s), our most recent execution %d", instance.id, cp.SequenceNumber, cp.Id, speculativeLastExec)
+
+ snapshotID, err := base64.StdEncoding.DecodeString(cp.Id)
+ if nil != err {
+ err = fmt.Errorf("Replica %d received a view change whose hash could not be decoded (%s)", instance.id, cp.Id)
+ logger.Error(err.Error())
+ return nil
+ }
+
+ target := &stateUpdateTarget{
+ checkpointMessage: checkpointMessage{
+ seqNo: cp.SequenceNumber,
+ id: snapshotID,
+ },
+ replicas: replicas,
+ }
+
+ instance.updateHighStateTarget(target)
+ instance.stateTransfer(target)
+ }
+
+ for n, d := range nv.Xset {
+ // PBFT: why should we use "h ≥ min{n | ∃d : ( ∈ X)}"?
+ // "h ≥ min{n | ∃d : ( ∈ X)} ∧ ∀ ∈ X : (n ≤ h ∨ ∃m ∈ in : (D(m) = d))"
+ if n <= instance.h {
+ continue
+ } else {
+ if d == "" {
+ // NULL request; skip
+ continue
+ }
+
+ if _, ok := instance.reqBatchStore[d]; !ok {
+ logger.Warningf("Replica %d missing assigned, non-checkpointed request batch %s",
+ instance.id, d)
+ if _, ok := instance.missingReqBatches[d]; !ok {
+ logger.Warningf("Replica %v requesting to fetch batch %s",
+ instance.id, d)
+ newReqBatchMissing = true
+ instance.missingReqBatches[d] = true
+ }
+ }
+ }
+ }
+
+ if len(instance.missingReqBatches) == 0 {
+ return instance.processNewView2(nv)
+ } else if newReqBatchMissing {
+ instance.fetchRequestBatches()
+ }
+
+ return nil
+}
+
+func (instance *pbftCore) processNewView2(nv *NewView) events.Event {
+ logger.Infof("Replica %d accepting new-view to view %d", instance.id, instance.view)
+
+ instance.stopTimer()
+ instance.nullRequestTimer.Stop()
+
+ instance.activeView = true
+ delete(instance.newViewStore, instance.view-1)
+
+ instance.seqNo = instance.h
+ for n, d := range nv.Xset {
+ if n <= instance.h {
+ continue
+ }
+
+ reqBatch, ok := instance.reqBatchStore[d]
+ if !ok && d != "" {
+ logger.Criticalf("Replica %d is missing request batch for seqNo=%d with digest '%s' for assigned prepare after fetching, this indicates a serious bug", instance.id, n, d)
+ }
+ preprep := &PrePrepare{
+ View: instance.view,
+ SequenceNumber: n,
+ BatchDigest: d,
+ RequestBatch: reqBatch,
+ ReplicaId: instance.id,
+ }
+ cert := instance.getCert(instance.view, n)
+ cert.prePrepare = preprep
+ cert.digest = d
+ if n > instance.seqNo {
+ instance.seqNo = n
+ }
+ instance.persistQSet()
+ }
+
+ instance.updateViewChangeSeqNo()
+
+ if instance.primary(instance.view) != instance.id {
+ for n, d := range nv.Xset {
+ prep := &Prepare{
+ View: instance.view,
+ SequenceNumber: n,
+ BatchDigest: d,
+ ReplicaId: instance.id,
+ }
+ if n > instance.h {
+ cert := instance.getCert(instance.view, n)
+ cert.sentPrepare = true
+ instance.recvPrepare(prep)
+ }
+ instance.innerBroadcast(&Message{Payload: &Message_Prepare{Prepare: prep}})
+ }
+ } else {
+ logger.Debugf("Replica %d is now primary, attempting to resubmit requests", instance.id)
+ instance.resubmitRequestBatches()
+ }
+
+ instance.startTimerIfOutstandingRequests()
+
+ logger.Debugf("Replica %d done cleaning view change artifacts, calling into consumer", instance.id)
+
+ return viewChangedEvent{}
+}
+
+func (instance *pbftCore) getViewChanges() (vset []*ViewChange) {
+ for _, vc := range instance.viewChangeStore {
+ vset = append(vset, vc)
+ }
+
+ return
+}
+
+func (instance *pbftCore) selectInitialCheckpoint(vset []*ViewChange) (checkpoint ViewChange_C, ok bool, replicas []uint64) {
+ checkpoints := make(map[ViewChange_C][]*ViewChange)
+ for _, vc := range vset {
+ for _, c := range vc.Cset { // TODO, verify that we strip duplicate checkpoints from this set
+ checkpoints[*c] = append(checkpoints[*c], vc)
+ logger.Debugf("Replica %d appending checkpoint from replica %d with seqNo=%d, h=%d, and checkpoint digest %s", instance.id, vc.ReplicaId, vc.H, c.SequenceNumber, c.Id)
+ }
+ }
+
+ if len(checkpoints) == 0 {
+ logger.Debugf("Replica %d has no checkpoints to select from: %d %s",
+ instance.id, len(instance.viewChangeStore), checkpoints)
+ return
+ }
+
+ for idx, vcList := range checkpoints {
+ // need weak certificate for the checkpoint
+ if len(vcList) <= instance.f { // type casting necessary to match types
+ logger.Debugf("Replica %d has no weak certificate for n:%d, vcList was %d long",
+ instance.id, idx.SequenceNumber, len(vcList))
+ continue
+ }
+
+ quorum := 0
+ // Note, this is the whole vset (S) in the paper, not just this checkpoint set (S') (vcList)
+ // We need 2f+1 low watermarks from S below this seqNo from all replicas
+ // We need f+1 matching checkpoints at this seqNo (S')
+ for _, vc := range vset {
+ if vc.H <= idx.SequenceNumber {
+ quorum++
+ }
+ }
+
+ if quorum < instance.intersectionQuorum() {
+ logger.Debugf("Replica %d has no quorum for n:%d", instance.id, idx.SequenceNumber)
+ continue
+ }
+
+ replicas = make([]uint64, len(vcList))
+ for i, vc := range vcList {
+ replicas[i] = vc.ReplicaId
+ }
+
+ if checkpoint.SequenceNumber <= idx.SequenceNumber {
+ checkpoint = idx
+ ok = true
+ }
+ }
+
+ return
+}
+
+func (instance *pbftCore) assignSequenceNumbers(vset []*ViewChange, h uint64) (msgList map[uint64]string) {
+ msgList = make(map[uint64]string)
+
+ maxN := h + 1
+
+ // "for all n such that h < n <= h + L"
+nLoop:
+ for n := h + 1; n <= h+instance.L; n++ {
+ // "∃m ∈ S..."
+ for _, m := range vset {
+ // "...with ∈ m.P"
+ for _, em := range m.Pset {
+ quorum := 0
+ // "A1. ∃2f+1 messages m' ∈ S"
+ mpLoop:
+ for _, mp := range vset {
+ if mp.H >= n {
+ continue
+ }
+ // "∀ ∈ m'.P"
+ for _, emp := range mp.Pset {
+ if n == emp.SequenceNumber && !(emp.View < em.View || (emp.View == em.View && emp.BatchDigest == em.BatchDigest)) {
+ continue mpLoop
+ }
+ }
+ quorum++
+ }
+
+ if quorum < instance.intersectionQuorum() {
+ continue
+ }
+
+ quorum = 0
+ // "A2. ∃f+1 messages m' ∈ S"
+ for _, mp := range vset {
+ // "∃ ∈ m'.Q"
+ for _, emp := range mp.Qset {
+ if n == emp.SequenceNumber && emp.View >= em.View && emp.BatchDigest == em.BatchDigest {
+ quorum++
+ }
+ }
+ }
+
+ if quorum < instance.f+1 {
+ continue
+ }
+
+ // "then select the request with digest d for number n"
+ msgList[n] = em.BatchDigest
+ maxN = n
+
+ continue nLoop
+ }
+ }
+
+ quorum := 0
+ // "else if ∃2f+1 messages m ∈ S"
+ nullLoop:
+ for _, m := range vset {
+ // "m.P has no entry"
+ for _, em := range m.Pset {
+ if em.SequenceNumber == n {
+ continue nullLoop
+ }
+ }
+ quorum++
+ }
+
+ if quorum >= instance.intersectionQuorum() {
+ // "then select the null request for number n"
+ msgList[n] = ""
+
+ continue nLoop
+ }
+
+ logger.Warningf("Replica %d could not assign value to contents of seqNo %d, found only %d missing P entries", instance.id, n, quorum)
+ return nil
+ }
+
+ // prune top null requests
+ for n, msg := range msgList {
+ if n > maxN && msg == "" {
+ delete(msgList, n)
+ }
+ }
+
+ return
+}
diff --git a/consensus/util/events/events.go b/consensus/util/events/events.go
new file mode 100644
index 00000000000..87ea157772f
--- /dev/null
+++ b/consensus/util/events/events.go
@@ -0,0 +1,278 @@
+/*
+Copyright IBM Corp. 2016 All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package events
+
+import (
+ "time"
+
+ "github.com/op/go-logging"
+)
+
+var logger *logging.Logger // package-level logger
+
+func init() {
+ logger = logging.MustGetLogger("consensus/util/events")
+}
+
+// Event is a type meant to clearly convey that the return type or parameter to a function will be supplied to/from an events.Manager
+type Event interface{}
+
+// Receiver is a consumer of events, ProcessEvent will be called serially
+// as events arrive
+type Receiver interface {
+ // ProcessEvent delivers an event to the Receiver, if it returns non-nil, the return is the next processed event
+ ProcessEvent(e Event) Event
+}
+
+// ------------------------------------------------------------
+//
+// Threaded object
+//
+// ------------------------------------------------------------
+
+// threaded holds an exit channel to allow threads to break from a select
+type threaded struct {
+ exit chan struct{}
+}
+
+// halt tells the threaded object's thread to exit
+func (t *threaded) Halt() {
+ select {
+ case <-t.exit:
+ logger.Warning("Attempted to halt a threaded object twice")
+ default:
+ close(t.exit)
+ }
+}
+
+// ------------------------------------------------------------
+//
+// Event Manager
+//
+// ------------------------------------------------------------
+
+// Manager provides a serialized interface for submitting events to
+// a Receiver on the other side of the queue
+type Manager interface {
+ Inject(Event) // A temporary interface to allow the event manager thread to skip the queue
+ Queue() chan<- Event // Get a write-only reference to the queue, to submit events
+ SetReceiver(Receiver) // Set the target to route events to
+ Start() // Starts the Manager thread TODO, these thread management things should probably go away
+ Halt() // Stops the Manager thread
+}
+
+// managerImpl is an implementation of Manger
+type managerImpl struct {
+ threaded
+ receiver Receiver
+ events chan Event
+}
+
+// NewManagerImpl creates an instance of managerImpl
+func NewManagerImpl() Manager {
+ return &managerImpl{
+ events: make(chan Event),
+ threaded: threaded{make(chan struct{})},
+ }
+}
+
+// SetReceiver sets the destination for events
+func (em *managerImpl) SetReceiver(receiver Receiver) {
+ em.receiver = receiver
+}
+
+// Start creates the go routine necessary to deliver events
+func (em *managerImpl) Start() {
+ go em.eventLoop()
+}
+
+// queue returns a write only reference to the event queue
+func (em *managerImpl) Queue() chan<- Event {
+ return em.events
+}
+
+// SendEvent performs the event loop on a receiver to completion
+func SendEvent(receiver Receiver, event Event) {
+ next := event
+ for {
+ // If an event returns something non-nil, then process it as a new event
+ next = receiver.ProcessEvent(next)
+ if next == nil {
+ break
+ }
+ }
+}
+
+// Inject can only safely be called by the managerImpl thread itself, it skips the queue
+func (em *managerImpl) Inject(event Event) {
+ if em.receiver != nil {
+ SendEvent(em.receiver, event)
+ }
+}
+
+// eventLoop is where the event thread loops, delivering events
+func (em *managerImpl) eventLoop() {
+ for {
+ select {
+ case next := <-em.events:
+ em.Inject(next)
+ case <-em.exit:
+ logger.Debug("eventLoop told to exit")
+ return
+ }
+ }
+}
+
+// ------------------------------------------------------------
+//
+// Event Timer
+//
+// ------------------------------------------------------------
+
+// Timer is an interface for managing time driven events
+// the special contract Timer gives which a traditional golang
+// timer does not, is that if the event thread calls stop, or reset
+// then even if the timer has already fired, the event will not be
+// delivered to the event queue
+type Timer interface {
+ SoftReset(duration time.Duration, event Event) // start a new countdown, only if one is not already started
+ Reset(duration time.Duration, event Event) // start a new countdown, clear any pending events
+ Stop() // stop the countdown, clear any pending events
+ Halt() // Stops the Timer thread
+}
+
+// TimerFactory abstracts the creation of Timers, as they may
+// need to be mocked for testing
+type TimerFactory interface {
+ CreateTimer() Timer // Creates an Timer which is stopped
+}
+
+// TimerFactoryImpl implements the TimerFactory
+type timerFactoryImpl struct {
+ manager Manager // The Manager to use in constructing the event timers
+}
+
+// NewTimerFactoryImpl creates a new TimerFactory for the given Manager
+func NewTimerFactoryImpl(manager Manager) TimerFactory {
+ return &timerFactoryImpl{manager}
+}
+
+// CreateTimer creates a new timer which deliver events to the Manager for this factory
+func (etf *timerFactoryImpl) CreateTimer() Timer {
+ return newTimerImpl(etf.manager)
+}
+
+// timerStart is used to deliver the start request to the eventTimer thread
+type timerStart struct {
+ hard bool // Whether to reset the timer if it is running
+ event Event // What event to push onto the event queue
+ duration time.Duration // How long to wait before sending the event
+}
+
+// timerImpl is an implementation of Timer
+type timerImpl struct {
+ threaded // Gives us the exit chan
+ timerChan <-chan time.Time // When non-nil, counts down to preparing to do the event
+ startChan chan *timerStart // Channel to deliver the timer start events to the service go routine
+ stopChan chan struct{} // Channel to deliver the timer stop events to the service go routine
+ manager Manager // The event manager to deliver the event to after timer expiration
+}
+
+// newTimer creates a new instance of timerImpl
+func newTimerImpl(manager Manager) Timer {
+ et := &timerImpl{
+ startChan: make(chan *timerStart),
+ stopChan: make(chan struct{}),
+ threaded: threaded{make(chan struct{})},
+ manager: manager,
+ }
+ go et.loop()
+ return et
+}
+
+// softReset tells the timer to start a new countdown, only if it is not currently counting down
+// this will not clear any pending events
+func (et *timerImpl) SoftReset(timeout time.Duration, event Event) {
+ et.startChan <- &timerStart{
+ duration: timeout,
+ event: event,
+ hard: false,
+ }
+}
+
+// reset tells the timer to start counting down from a new timeout, this also clears any pending events
+func (et *timerImpl) Reset(timeout time.Duration, event Event) {
+ et.startChan <- &timerStart{
+ duration: timeout,
+ event: event,
+ hard: true,
+ }
+}
+
+// stop tells the timer to stop, and not to deliver any pending events
+func (et *timerImpl) Stop() {
+ et.stopChan <- struct{}{}
+}
+
+// loop is where the timer thread lives, looping
+func (et *timerImpl) loop() {
+ var eventDestChan chan<- Event
+ var event Event
+
+ for {
+ // A little state machine, relying on the fact that nil channels will block on read/write indefinitely
+
+ select {
+ case start := <-et.startChan:
+ if et.timerChan != nil {
+ if start.hard {
+ logger.Debug("Resetting a running timer")
+ } else {
+ continue
+ }
+ }
+ logger.Debug("Starting timer")
+ et.timerChan = time.After(start.duration)
+ if eventDestChan != nil {
+ logger.Debug("Timer cleared pending event")
+ }
+ event = start.event
+ eventDestChan = nil
+ case <-et.stopChan:
+ if et.timerChan == nil && eventDestChan == nil {
+ logger.Debug("Attempting to stop an unfired idle timer")
+ }
+ et.timerChan = nil
+ logger.Debug("Stopping timer")
+ if eventDestChan != nil {
+ logger.Debug("Timer cleared pending event")
+ }
+ eventDestChan = nil
+ event = nil
+ case <-et.timerChan:
+ logger.Debug("Event timer fired")
+ et.timerChan = nil
+ eventDestChan = et.manager.Queue()
+ case eventDestChan <- event:
+ logger.Debug("Timer event delivered")
+ eventDestChan = nil
+ case <-et.exit:
+ logger.Debug("Halting timer")
+ return
+ }
+ }
+}
diff --git a/consensus/util/events/events_test.go b/consensus/util/events/events_test.go
new file mode 100644
index 00000000000..103bbfa9d76
--- /dev/null
+++ b/consensus/util/events/events_test.go
@@ -0,0 +1,172 @@
+/*
+Copyright IBM Corp. 2016 All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package events
+
+import (
+ "testing"
+ "time"
+)
+
+type mockEvent struct {
+ info string
+}
+
+type mockReceiver struct {
+ processEventImpl func(event Event) Event
+}
+
+func (mr *mockReceiver) ProcessEvent(event Event) Event {
+ if mr.processEventImpl != nil {
+ return mr.processEventImpl(event)
+ }
+ return nil
+}
+
+func newMockManager(processEvent func(event Event) Event) Manager {
+ manager := NewManagerImpl()
+ manager.SetReceiver(&mockReceiver{
+ processEventImpl: processEvent,
+ })
+ return manager
+}
+
+// Starts an event timer, waits for the event to be delivered
+func TestEventTimerStart(t *testing.T) {
+ events := make(chan Event)
+ mr := newMockManager(func(event Event) Event {
+ events <- event
+ return nil
+ })
+ mr.Start()
+ defer mr.Halt()
+ timer := newTimerImpl(mr)
+ defer timer.Halt()
+ me := &mockEvent{}
+ timer.Reset(time.Millisecond, me)
+
+ select {
+ case e := <-events:
+ if e != me {
+ t.Fatalf("Received wrong output from event timer")
+ }
+ case <-time.After(time.Second):
+ t.Fatalf("Timed out waiting for event to fire")
+ }
+}
+
+// Starts an event timer, resets it twice, expects second output
+func TestEventTimerHardReset(t *testing.T) {
+ events := make(chan Event)
+ mr := newMockManager(func(event Event) Event {
+ events <- event
+ return nil
+ })
+ timer := newTimerImpl(mr)
+ defer timer.Halt()
+ me1 := &mockEvent{"one"}
+ me2 := &mockEvent{"two"}
+ timer.Reset(time.Millisecond, me1)
+ timer.Reset(time.Millisecond, me2)
+
+ mr.Start()
+ defer mr.Halt()
+
+ select {
+ case e := <-events:
+ if e != me2 {
+ t.Fatalf("Received wrong output (%v) from event timer", e)
+ }
+ case <-time.After(time.Second):
+ t.Fatalf("Timed out waiting for event to fire")
+ }
+}
+
+// Starts an event timer, soft resets it twice, expects first output
+func TestEventTimerSoftReset(t *testing.T) {
+ events := make(chan Event)
+ mr := newMockManager(func(event Event) Event {
+ events <- event
+ return nil
+ })
+ timer := newTimerImpl(mr)
+ defer timer.Halt()
+ me1 := &mockEvent{"one"}
+ me2 := &mockEvent{"two"}
+ timer.SoftReset(time.Millisecond, me1)
+ timer.SoftReset(time.Millisecond, me2)
+
+ mr.Start()
+ defer mr.Halt()
+
+ select {
+ case e := <-events:
+ if e != me1 {
+ t.Fatalf("Received wrong output (%v) from event timer", e)
+ }
+ case <-time.After(time.Second):
+ t.Fatalf("Timed out waiting for event to fire")
+ }
+}
+
+// Starts an event timer, then stops it before delivery is possible, should not receive event
+func TestEventTimerStop(t *testing.T) {
+ events := make(chan Event)
+ mr := newMockManager(func(event Event) Event {
+ events <- event
+ return nil
+ })
+ timer := newTimerImpl(mr)
+ defer timer.Halt()
+ me := &mockEvent{}
+ timer.Reset(time.Millisecond, me)
+ time.Sleep(100 * time.Millisecond) // Allow the timer to fire
+ timer.Stop()
+
+ mr.Start()
+ defer mr.Halt()
+
+ select {
+ case <-events:
+ t.Fatalf("Received event output from event timer")
+ case <-time.After(100 * time.Millisecond):
+ // All good
+ }
+}
+
+// Replies to an event with a different event, should process both
+func TestEventManagerLoop(t *testing.T) {
+ success := make(chan struct{})
+ m2 := &mockEvent{}
+ mr := newMockManager(func(event Event) Event {
+ if event != m2 {
+ return m2
+ }
+ success <- struct{}{}
+ return nil
+ })
+ mr.Start()
+ defer mr.Halt()
+
+ mr.Queue() <- &mockEvent{}
+
+ select {
+ case <-success:
+ // All good
+ case <-time.After(2 * time.Second):
+ t.Fatalf("Did not succeed processing second event")
+ }
+}
diff --git a/consensus/util/messagefan.go b/consensus/util/messagefan.go
new file mode 100644
index 00000000000..4d4a66edcd0
--- /dev/null
+++ b/consensus/util/messagefan.go
@@ -0,0 +1,84 @@
+/*
+Copyright IBM Corp. 2016 All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package util
+
+import (
+ "sync"
+
+ "github.com/op/go-logging"
+
+ pb "github.com/hyperledger/fabric/protos"
+)
+
+var logger *logging.Logger // package-level logger
+
+func init() {
+ logger = logging.MustGetLogger("consensus/util")
+}
+
+// Message encapsulates an OpenchainMessage with sender information
+type Message struct {
+ Msg *pb.Message
+ Sender *pb.PeerID
+}
+
+// MessageFan contains the reference to the peer's MessageHandlerCoordinator
+type MessageFan struct {
+ ins map[*pb.PeerID]<-chan *Message
+ out chan *Message
+ lock sync.Mutex
+}
+
+// NewMessageFan will return an initialized MessageFan
+func NewMessageFan() *MessageFan {
+ return &MessageFan{
+ ins: make(map[*pb.PeerID]<-chan *Message),
+ out: make(chan *Message),
+ }
+}
+
+// RegisterChannel is intended to be invoked by Handler to add a channel to be fan-ed in
+func (fan *MessageFan) RegisterChannel(sender *pb.PeerID, channel <-chan *Message) {
+ fan.lock.Lock()
+ defer fan.lock.Unlock()
+
+ if _, ok := fan.ins[sender]; ok {
+ logger.Warningf("Received duplicate connection from %v, switching to new connection", sender)
+ } else {
+ logger.Infof("Registering connection from %v", sender)
+ }
+
+ fan.ins[sender] = channel
+
+ go func() {
+ for msg := range channel {
+ fan.out <- msg
+ }
+
+ logger.Infof("Connection from peer %v terminated", sender)
+
+ fan.lock.Lock()
+ defer fan.lock.Unlock()
+
+ delete(fan.ins, sender)
+ }()
+}
+
+// GetOutChannel returns a read only channel which the registered channels fan into
+func (fan *MessageFan) GetOutChannel() <-chan *Message {
+ return fan.out
+}
diff --git a/consensus/util/messagefan_test.go b/consensus/util/messagefan_test.go
new file mode 100644
index 00000000000..b30d983cc75
--- /dev/null
+++ b/consensus/util/messagefan_test.go
@@ -0,0 +1,82 @@
+/*
+Copyright IBM Corp. 2016 All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package util
+
+import (
+ "fmt"
+ "testing"
+ "time"
+
+ pb "github.com/hyperledger/fabric/protos"
+)
+
+func TestFanIn(t *testing.T) {
+ Channels := 500
+ Messages := 100
+
+ fh := NewMessageFan()
+
+ for i := 0; i < Channels; i++ {
+ c := make(chan *Message, Messages/2)
+ pid := &pb.PeerID{Name: fmt.Sprintf("%d", i)}
+ fh.RegisterChannel(pid, c)
+ go func() {
+ for j := 0; j < Messages; j++ {
+ c <- &Message{}
+ }
+ }()
+ }
+
+ r := fh.GetOutChannel()
+
+ count := 0
+ for {
+ select {
+ case <-r:
+ case <-time.After(time.Second):
+ t.Fatalf("Timed out waiting to read %d messages from channel, at message %d", Channels*Messages, count)
+ }
+ count++
+ if count == Channels*Messages {
+ break
+ }
+ }
+
+ select {
+ case <-r:
+ t.Fatalf("Read more than %d messages from channel", Channels*Messages)
+ default:
+ }
+
+}
+
+func TestFanChannelClose(t *testing.T) {
+ fh := NewMessageFan()
+ c := make(chan *Message)
+ pid := &pb.PeerID{Name: "1"}
+ fh.RegisterChannel(pid, c)
+ close(c)
+
+ for i := 0; i < 100; i++ {
+ if len(fh.ins) == 0 {
+ return
+ }
+ time.Sleep(time.Millisecond)
+ }
+
+ t.Fatalf("Channel was not cleaned up")
+}
diff --git a/core/admin.go b/core/admin.go
new file mode 100644
index 00000000000..7221d2e4af4
--- /dev/null
+++ b/core/admin.go
@@ -0,0 +1,81 @@
+/*
+Copyright IBM Corp. 2016 All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package core
+
+import (
+ "os"
+ "runtime"
+
+ "github.com/op/go-logging"
+ "github.com/spf13/viper"
+ "golang.org/x/net/context"
+
+ "google/protobuf"
+
+ pb "github.com/hyperledger/fabric/protos"
+)
+
+var log = logging.MustGetLogger("server")
+
+// NewAdminServer creates and returns a Admin service instance.
+func NewAdminServer() *ServerAdmin {
+ s := new(ServerAdmin)
+ return s
+}
+
+// ServerAdmin implementation of the Admin service for the Peer
+type ServerAdmin struct {
+}
+
+func worker(id int, die chan struct{}) {
+ for {
+ select {
+ case <-die:
+ log.Debugf("worker %d terminating", id)
+ return
+ default:
+ log.Debugf("%d is working...", id)
+ runtime.Gosched()
+ }
+ }
+}
+
+// GetStatus reports the status of the server
+func (*ServerAdmin) GetStatus(context.Context, *google_protobuf.Empty) (*pb.ServerStatus, error) {
+ status := &pb.ServerStatus{Status: pb.ServerStatus_STARTED}
+ log.Debugf("returning status: %s", status)
+ return status, nil
+}
+
+// StartServer starts the server
+func (*ServerAdmin) StartServer(context.Context, *google_protobuf.Empty) (*pb.ServerStatus, error) {
+ status := &pb.ServerStatus{Status: pb.ServerStatus_STARTED}
+ log.Debugf("returning status: %s", status)
+ return status, nil
+}
+
+// StopServer stops the server
+func (*ServerAdmin) StopServer(context.Context, *google_protobuf.Empty) (*pb.ServerStatus, error) {
+ status := &pb.ServerStatus{Status: pb.ServerStatus_STOPPED}
+ log.Debugf("returning status: %s", status)
+
+ pidFile := viper.GetString("peer.fileSystemPath") + "/peer.pid"
+ log.Debugf("Remove pid file %s", pidFile)
+ os.Remove(pidFile)
+ defer os.Exit(0)
+ return status, nil
+}
diff --git a/core/admin_test.go b/core/admin_test.go
new file mode 100644
index 00000000000..78287e9d094
--- /dev/null
+++ b/core/admin_test.go
@@ -0,0 +1,24 @@
+/*
+Copyright IBM Corp. 2016 All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package core
+
+import "testing"
+
+func TestServer_Status(t *testing.T) {
+ t.Skip("TBD")
+ //performHandshake(t, peerClientConn)
+}
diff --git a/core/chaincode/chaincode_support.go b/core/chaincode/chaincode_support.go
new file mode 100644
index 00000000000..e1c303e947a
--- /dev/null
+++ b/core/chaincode/chaincode_support.go
@@ -0,0 +1,666 @@
+/*
+Copyright IBM Corp. 2016 All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package chaincode
+
+import (
+ "bytes"
+ "fmt"
+ "io"
+ "strconv"
+ "sync"
+ "time"
+
+ "github.com/golang/protobuf/proto"
+ "github.com/spf13/viper"
+ "golang.org/x/net/context"
+
+ "strings"
+
+ "github.com/hyperledger/fabric/core/container"
+ "github.com/hyperledger/fabric/core/container/ccintf"
+ "github.com/hyperledger/fabric/core/crypto"
+ "github.com/hyperledger/fabric/core/ledger"
+ pb "github.com/hyperledger/fabric/protos"
+)
+
+// ChainName is the name of the chain to which this chaincode support belongs to.
+type ChainName string
+
+const (
+ // DefaultChain is the name of the default chain.
+ DefaultChain ChainName = "default"
+ // DevModeUserRunsChaincode property allows user to run chaincode in development environment
+ DevModeUserRunsChaincode string = "dev"
+ chaincodeStartupTimeoutDefault int = 5000
+ chaincodeInstallPathDefault string = "/opt/gopath/bin/"
+ peerAddressDefault string = "0.0.0.0:30303"
+)
+
+// chains is a map between different blockchains and their ChaincodeSupport.
+//this needs to be a first class, top-level object... for now, lets just have a placeholder
+var chains map[ChainName]*ChaincodeSupport
+
+func init() {
+ chains = make(map[ChainName]*ChaincodeSupport)
+}
+
+//chaincode runtime environment encapsulates handler and container environment
+//This is where the VM that's running the chaincode would hook in
+type chaincodeRTEnv struct {
+ handler *Handler
+}
+
+// runningChaincodes contains maps of chaincodeIDs to their chaincodeRTEs
+type runningChaincodes struct {
+ sync.RWMutex
+ // chaincode environment for each chaincode
+ chaincodeMap map[string]*chaincodeRTEnv
+}
+
+// GetChain returns the chaincode support for a given chain
+func GetChain(name ChainName) *ChaincodeSupport {
+ return chains[name]
+}
+
+//call this under lock
+func (chaincodeSupport *ChaincodeSupport) preLaunchSetup(chaincode string) chan bool {
+ //register placeholder Handler. This will be transferred in registerHandler
+ //NOTE: from this point, existence of handler for this chaincode means the chaincode
+ //is in the process of getting started (or has been started)
+ notfy := make(chan bool, 1)
+ chaincodeSupport.runningChaincodes.chaincodeMap[chaincode] = &chaincodeRTEnv{handler: &Handler{readyNotify: notfy}}
+ return notfy
+}
+
+//call this under lock
+func (chaincodeSupport *ChaincodeSupport) chaincodeHasBeenLaunched(chaincode string) (*chaincodeRTEnv, bool) {
+ chrte, hasbeenlaunched := chaincodeSupport.runningChaincodes.chaincodeMap[chaincode]
+ return chrte, hasbeenlaunched
+}
+
+// NewChaincodeSupport creates a new ChaincodeSupport instance
+func NewChaincodeSupport(chainname ChainName, getPeerEndpoint func() (*pb.PeerEndpoint, error), userrunsCC bool, ccstartuptimeout time.Duration, secHelper crypto.Peer) *ChaincodeSupport {
+ pnid := viper.GetString("peer.networkId")
+ pid := viper.GetString("peer.id")
+
+ s := &ChaincodeSupport{name: chainname, runningChaincodes: &runningChaincodes{chaincodeMap: make(map[string]*chaincodeRTEnv)}, secHelper: secHelper, peerNetworkID: pnid, peerID: pid}
+
+ //initialize global chain
+ chains[chainname] = s
+
+ peerEndpoint, err := getPeerEndpoint()
+ if err != nil {
+ chaincodeLogger.Errorf("Error getting PeerEndpoint, using peer.address: %s", err)
+ s.peerAddress = viper.GetString("peer.address")
+ } else {
+ s.peerAddress = peerEndpoint.Address
+ }
+ chaincodeLogger.Infof("Chaincode support using peerAddress: %s\n", s.peerAddress)
+ //peerAddress = viper.GetString("peer.address")
+ if s.peerAddress == "" {
+ s.peerAddress = peerAddressDefault
+ }
+
+ s.userRunsCC = userrunsCC
+
+ s.ccStartupTimeout = ccstartuptimeout
+
+ //TODO I'm not sure if this needs to be on a per chain basis... too lowel and just needs to be a global default ?
+ s.chaincodeInstallPath = viper.GetString("chaincode.installpath")
+ if s.chaincodeInstallPath == "" {
+ s.chaincodeInstallPath = chaincodeInstallPathDefault
+ }
+
+ s.peerTLS = viper.GetBool("peer.tls.enabled")
+ if s.peerTLS {
+ s.peerTLSCertFile = viper.GetString("peer.tls.cert.file")
+ s.peerTLSKeyFile = viper.GetString("peer.tls.key.file")
+ s.peerTLSSvrHostOrd = viper.GetString("peer.tls.serverhostoverride")
+ }
+
+ kadef := 0
+ if ka := viper.GetString("chaincode.keepalive"); ka == "" {
+ s.keepalive = time.Duration(kadef) * time.Second
+ } else {
+ t, terr := strconv.Atoi(ka)
+ if terr != nil {
+ chaincodeLogger.Errorf("Invalid keepalive value %s (%s) defaulting to %d", ka, terr, kadef)
+ t = kadef
+ } else if t <= 0 {
+ chaincodeLogger.Debugf("Turn off keepalive(value %s)", ka)
+ t = kadef
+ }
+ s.keepalive = time.Duration(t) * time.Second
+ }
+
+ return s
+}
+
+// // ChaincodeStream standard stream for ChaincodeMessage type.
+// type ChaincodeStream interface {
+// Send(*pb.ChaincodeMessage) error
+// Recv() (*pb.ChaincodeMessage, error)
+// }
+
+// ChaincodeSupport responsible for providing interfacing with chaincodes from the Peer.
+type ChaincodeSupport struct {
+ name ChainName
+ runningChaincodes *runningChaincodes
+ peerAddress string
+ ccStartupTimeout time.Duration
+ chaincodeInstallPath string
+ userRunsCC bool
+ secHelper crypto.Peer
+ peerNetworkID string
+ peerID string
+ peerTLS bool
+ peerTLSCertFile string
+ peerTLSKeyFile string
+ peerTLSSvrHostOrd string
+ keepalive time.Duration
+}
+
+// DuplicateChaincodeHandlerError returned if attempt to register same chaincodeID while a stream already exists.
+type DuplicateChaincodeHandlerError struct {
+ ChaincodeID *pb.ChaincodeID
+}
+
+func (d *DuplicateChaincodeHandlerError) Error() string {
+ return fmt.Sprintf("Duplicate chaincodeID error: %s", d.ChaincodeID)
+}
+
+func newDuplicateChaincodeHandlerError(chaincodeHandler *Handler) error {
+ return &DuplicateChaincodeHandlerError{ChaincodeID: chaincodeHandler.ChaincodeID}
+}
+
+func (chaincodeSupport *ChaincodeSupport) registerHandler(chaincodehandler *Handler) error {
+ key := chaincodehandler.ChaincodeID.Name
+
+ chaincodeSupport.runningChaincodes.Lock()
+ defer chaincodeSupport.runningChaincodes.Unlock()
+
+ chrte2, ok := chaincodeSupport.chaincodeHasBeenLaunched(key)
+ if ok && chrte2.handler.registered == true {
+ chaincodeLogger.Debugf("duplicate registered handler(key:%s) return error", key)
+ // Duplicate, return error
+ return newDuplicateChaincodeHandlerError(chaincodehandler)
+ }
+ //a placeholder, unregistered handler will be setup by query or transaction processing that comes
+ //through via consensus. In this case we swap the handler and give it the notify channel
+ if chrte2 != nil {
+ chaincodehandler.readyNotify = chrte2.handler.readyNotify
+ chrte2.handler = chaincodehandler
+ } else {
+ chaincodeSupport.runningChaincodes.chaincodeMap[key] = &chaincodeRTEnv{handler: chaincodehandler}
+ }
+
+ chaincodehandler.registered = true
+
+ //now we are ready to receive messages and send back responses
+ chaincodehandler.txCtxs = make(map[string]*transactionContext)
+ chaincodehandler.uuidMap = make(map[string]bool)
+ chaincodehandler.isTransaction = make(map[string]bool)
+
+ chaincodeLogger.Debugf("registered handler complete for chaincode %s", key)
+
+ return nil
+}
+
+func (chaincodeSupport *ChaincodeSupport) deregisterHandler(chaincodehandler *Handler) error {
+
+ // clean up rangeQueryIteratorMap
+ for _, context := range chaincodehandler.txCtxs {
+ for _, v := range context.rangeQueryIteratorMap {
+ v.Close()
+ }
+ }
+
+ key := chaincodehandler.ChaincodeID.Name
+ chaincodeLogger.Debugf("Deregister handler: %s", key)
+ chaincodeSupport.runningChaincodes.Lock()
+ defer chaincodeSupport.runningChaincodes.Unlock()
+ if _, ok := chaincodeSupport.chaincodeHasBeenLaunched(key); !ok {
+ // Handler NOT found
+ return fmt.Errorf("Error deregistering handler, could not find handler with key: %s", key)
+ }
+ delete(chaincodeSupport.runningChaincodes.chaincodeMap, key)
+ chaincodeLogger.Debugf("Deregistered handler with key: %s", key)
+ return nil
+}
+
+// Based on state of chaincode send either init or ready to move to ready state
+func (chaincodeSupport *ChaincodeSupport) sendInitOrReady(context context.Context, uuid string, chaincode string, f *string, initArgs []string, timeout time.Duration, tx *pb.Transaction, depTx *pb.Transaction) error {
+ chaincodeSupport.runningChaincodes.Lock()
+ //if its in the map, there must be a connected stream...nothing to do
+ var chrte *chaincodeRTEnv
+ var ok bool
+ if chrte, ok = chaincodeSupport.chaincodeHasBeenLaunched(chaincode); !ok {
+ chaincodeSupport.runningChaincodes.Unlock()
+ chaincodeLogger.Debugf("handler not found for chaincode %s", chaincode)
+ return fmt.Errorf("handler not found for chaincode %s", chaincode)
+ }
+ chaincodeSupport.runningChaincodes.Unlock()
+
+ var notfy chan *pb.ChaincodeMessage
+ var err error
+ if notfy, err = chrte.handler.initOrReady(uuid, f, initArgs, tx, depTx); err != nil {
+ return fmt.Errorf("Error sending %s: %s", pb.ChaincodeMessage_INIT, err)
+ }
+ if notfy != nil {
+ select {
+ case ccMsg := <-notfy:
+ if ccMsg.Type == pb.ChaincodeMessage_ERROR {
+ err = fmt.Errorf("Error initializing container %s: %s", chaincode, string(ccMsg.Payload))
+ }
+ case <-time.After(timeout):
+ err = fmt.Errorf("Timeout expired while executing send init message")
+ }
+ }
+
+ //if initOrReady succeeded, our responsibility to delete the context
+ chrte.handler.deleteTxContext(uuid)
+
+ return err
+}
+
+//get args and env given chaincodeID
+func (chaincodeSupport *ChaincodeSupport) getArgsAndEnv(cID *pb.ChaincodeID, cLang pb.ChaincodeSpec_Type) (args []string, envs []string, err error) {
+ envs = []string{"CORE_CHAINCODE_ID_NAME=" + cID.Name}
+ //if TLS is enabled, pass TLS material to chaincode
+ if chaincodeSupport.peerTLS {
+ envs = append(envs, "CORE_PEER_TLS_ENABLED=true")
+ envs = append(envs, "CORE_PEER_TLS_CERT_FILE="+chaincodeSupport.peerTLSCertFile)
+ if chaincodeSupport.peerTLSSvrHostOrd != "" {
+ envs = append(envs, "CORE_PEER_TLS_SERVERHOSTOVERRIDE="+chaincodeSupport.peerTLSSvrHostOrd)
+ }
+ } else {
+ envs = append(envs, "CORE_PEER_TLS_ENABLED=false")
+ }
+ switch cLang {
+ case pb.ChaincodeSpec_GOLANG, pb.ChaincodeSpec_CAR:
+ //chaincode executable will be same as the name of the chaincode
+ args = []string{chaincodeSupport.chaincodeInstallPath + cID.Name, fmt.Sprintf("-peer.address=%s", chaincodeSupport.peerAddress)}
+ chaincodeLogger.Debugf("Executable is %s", args[0])
+ case pb.ChaincodeSpec_JAVA:
+ //TODO add security args
+ args = strings.Split(
+ fmt.Sprintf("/usr/bin/gradle run -p /root -PappArgs=[\"-a\",\"%s\",\"-i\",\"%s\"]"+
+ " -x processResources -x classes", chaincodeSupport.peerAddress, cID.Name),
+ " ")
+ chaincodeLogger.Debugf("Executable is gradle run on chaincode ID %s", cID.Name)
+ default:
+ return nil, nil, fmt.Errorf("Unknown chaincodeType: %s", cLang)
+ }
+ return args, envs, nil
+}
+
+// launchAndWaitForRegister will launch container if not already running. Use the targz to create the image if not found
+func (chaincodeSupport *ChaincodeSupport) launchAndWaitForRegister(ctxt context.Context, cds *pb.ChaincodeDeploymentSpec, cID *pb.ChaincodeID, uuid string, cLang pb.ChaincodeSpec_Type, targz io.Reader) (bool, error) {
+ chaincode := cID.Name
+ if chaincode == "" {
+ return false, fmt.Errorf("chaincode name not set")
+ }
+
+ chaincodeSupport.runningChaincodes.Lock()
+ var ok bool
+ //if its in the map, there must be a connected stream...nothing to do
+ if _, ok = chaincodeSupport.chaincodeHasBeenLaunched(chaincode); ok {
+ chaincodeLogger.Debugf("chaincode is running and ready: %s", chaincode)
+ chaincodeSupport.runningChaincodes.Unlock()
+ return true, nil
+ }
+ alreadyRunning := false
+
+ notfy := chaincodeSupport.preLaunchSetup(chaincode)
+ chaincodeSupport.runningChaincodes.Unlock()
+
+ //launch the chaincode
+
+ args, env, err := chaincodeSupport.getArgsAndEnv(cID, cLang)
+ if err != nil {
+ return alreadyRunning, err
+ }
+
+ chaincodeLogger.Debugf("start container: %s(networkid:%s,peerid:%s)", chaincode, chaincodeSupport.peerNetworkID, chaincodeSupport.peerID)
+
+ vmtype, _ := chaincodeSupport.getVMType(cds)
+
+ sir := container.StartImageReq{CCID: ccintf.CCID{ChaincodeSpec: cds.ChaincodeSpec, NetworkID: chaincodeSupport.peerNetworkID, PeerID: chaincodeSupport.peerID}, Reader: targz, Args: args, Env: env}
+
+ ipcCtxt := context.WithValue(ctxt, ccintf.GetCCHandlerKey(), chaincodeSupport)
+
+ resp, err := container.VMCProcess(ipcCtxt, vmtype, sir)
+ if err != nil || (resp != nil && resp.(container.VMCResp).Err != nil) {
+ if err == nil {
+ err = resp.(container.VMCResp).Err
+ }
+ err = fmt.Errorf("Error starting container: %s", err)
+ chaincodeSupport.runningChaincodes.Lock()
+ delete(chaincodeSupport.runningChaincodes.chaincodeMap, chaincode)
+ chaincodeSupport.runningChaincodes.Unlock()
+ return alreadyRunning, err
+ }
+
+ //wait for REGISTER state
+ select {
+ case ok := <-notfy:
+ if !ok {
+ err = fmt.Errorf("registration failed for %s(networkid:%s,peerid:%s,tx:%s)", chaincode, chaincodeSupport.peerNetworkID, chaincodeSupport.peerID, uuid)
+ }
+ case <-time.After(chaincodeSupport.ccStartupTimeout):
+ err = fmt.Errorf("Timeout expired while starting chaincode %s(networkid:%s,peerid:%s,tx:%s)", chaincode, chaincodeSupport.peerNetworkID, chaincodeSupport.peerID, uuid)
+ }
+ if err != nil {
+ chaincodeLogger.Debugf("stopping due to error while launching %s", err)
+ errIgnore := chaincodeSupport.Stop(ctxt, cds)
+ if errIgnore != nil {
+ chaincodeLogger.Debugf("error on stop %s(%s)", errIgnore, err)
+ }
+ }
+ return alreadyRunning, err
+}
+
+//Stop stops a chaincode if running
+func (chaincodeSupport *ChaincodeSupport) Stop(context context.Context, cds *pb.ChaincodeDeploymentSpec) error {
+ chaincode := cds.ChaincodeSpec.ChaincodeID.Name
+ if chaincode == "" {
+ return fmt.Errorf("chaincode name not set")
+ }
+
+ //stop the chaincode
+ sir := container.StopImageReq{CCID: ccintf.CCID{ChaincodeSpec: cds.ChaincodeSpec, NetworkID: chaincodeSupport.peerNetworkID, PeerID: chaincodeSupport.peerID}, Timeout: 0}
+
+ vmtype, _ := chaincodeSupport.getVMType(cds)
+
+ _, err := container.VMCProcess(context, vmtype, sir)
+ if err != nil {
+ err = fmt.Errorf("Error stopping container: %s", err)
+ //but proceed to cleanup
+ }
+
+ chaincodeSupport.runningChaincodes.Lock()
+ if _, ok := chaincodeSupport.chaincodeHasBeenLaunched(chaincode); !ok {
+ //nothing to do
+ chaincodeSupport.runningChaincodes.Unlock()
+ return nil
+ }
+
+ delete(chaincodeSupport.runningChaincodes.chaincodeMap, chaincode)
+
+ chaincodeSupport.runningChaincodes.Unlock()
+
+ return err
+}
+
+// Launch will launch the chaincode if not running (if running return nil) and will wait for handler of the chaincode to get into FSM ready state.
+func (chaincodeSupport *ChaincodeSupport) Launch(context context.Context, t *pb.Transaction) (*pb.ChaincodeID, *pb.ChaincodeInput, error) {
+ //build the chaincode
+ var cID *pb.ChaincodeID
+ var cMsg *pb.ChaincodeInput
+ var f *string
+ var cLang pb.ChaincodeSpec_Type
+ var initargs []string
+
+ cds := &pb.ChaincodeDeploymentSpec{}
+ if t.Type == pb.Transaction_CHAINCODE_DEPLOY {
+ err := proto.Unmarshal(t.Payload, cds)
+ if err != nil {
+ return nil, nil, err
+ }
+ cID = cds.ChaincodeSpec.ChaincodeID
+ cMsg = cds.ChaincodeSpec.CtorMsg
+ cLang = cds.ChaincodeSpec.Type
+ f = &cMsg.Function
+ initargs = cMsg.Args
+ } else if t.Type == pb.Transaction_CHAINCODE_INVOKE || t.Type == pb.Transaction_CHAINCODE_QUERY {
+ ci := &pb.ChaincodeInvocationSpec{}
+ err := proto.Unmarshal(t.Payload, ci)
+ if err != nil {
+ return nil, nil, err
+ }
+ cID = ci.ChaincodeSpec.ChaincodeID
+ cMsg = ci.ChaincodeSpec.CtorMsg
+ } else {
+ chaincodeSupport.runningChaincodes.Unlock()
+ return nil, nil, fmt.Errorf("invalid transaction type: %d", t.Type)
+ }
+ chaincode := cID.Name
+ chaincodeSupport.runningChaincodes.Lock()
+ var chrte *chaincodeRTEnv
+ var ok bool
+ var err error
+ //if its in the map, there must be a connected stream...nothing to do
+ if chrte, ok = chaincodeSupport.chaincodeHasBeenLaunched(chaincode); ok {
+ if !chrte.handler.registered {
+ chaincodeSupport.runningChaincodes.Unlock()
+ chaincodeLogger.Debugf("premature execution - chaincode (%s) is being launched", chaincode)
+ err = fmt.Errorf("premature execution - chaincode (%s) is being launched", chaincode)
+ return cID, cMsg, err
+ }
+ if chrte.handler.isRunning() {
+ chaincodeLogger.Debugf("chaincode is running(no need to launch) : %s", chaincode)
+ chaincodeSupport.runningChaincodes.Unlock()
+ return cID, cMsg, nil
+ }
+ chaincodeLogger.Debugf("Container not in READY state(%s)...send init/ready", chrte.handler.FSM.Current())
+ }
+ chaincodeSupport.runningChaincodes.Unlock()
+
+ var depTx *pb.Transaction
+
+ //extract depTx so we can initialize hander.deployTXSecContext
+ //we need it only after container is launched and only if this is not a deploy tx
+ //NOTE: ideally this section should be moved before just before sendInitOrReady where
+ // where we need depTx. However, as we don't check for ExecuteTransactions failure
+ // in consensus/helper, the following race is not resolved:
+ // 1) deploy creates image
+ // 2) query launches chaincode
+ // 3) deploy returns "premature execution" error
+ // 4) error ignored and deploy committed
+ // 5) query successfully retrives committed tx and calls sendInitOrReady
+ // See issue #710
+
+ if t.Type != pb.Transaction_CHAINCODE_DEPLOY {
+ ledger, ledgerErr := ledger.GetLedger()
+
+ if chaincodeSupport.userRunsCC {
+ chaincodeLogger.Error("You are attempting to perform an action other than Deploy on Chaincode that is not ready and you are in developer mode. Did you forget to Deploy your chaincode?")
+ }
+
+ if ledgerErr != nil {
+ return cID, cMsg, fmt.Errorf("Failed to get handle to ledger (%s)", ledgerErr)
+ }
+
+ //hopefully we are restarting from existing image and the deployed transaction exists
+ depTx, ledgerErr = ledger.GetTransactionByUUID(chaincode)
+ if ledgerErr != nil {
+ return cID, cMsg, fmt.Errorf("Could not get deployment transaction for %s - %s", chaincode, ledgerErr)
+ }
+ if depTx == nil {
+ return cID, cMsg, fmt.Errorf("deployment transaction does not exist for %s", chaincode)
+ }
+ if nil != chaincodeSupport.secHelper {
+ var err error
+ depTx, err = chaincodeSupport.secHelper.TransactionPreExecution(depTx)
+ // Note that t is now decrypted and is a deep clone of the original input t
+ if nil != err {
+ return cID, cMsg, fmt.Errorf("failed tx preexecution%s - %s", chaincode, err)
+ }
+ }
+ //Get lang from original deployment
+ err := proto.Unmarshal(depTx.Payload, cds)
+ if err != nil {
+ return cID, cMsg, fmt.Errorf("failed to unmarshal deployment transactions for %s - %s", chaincode, err)
+ }
+ cLang = cds.ChaincodeSpec.Type
+ }
+
+ //from here on : if we launch the container and get an error, we need to stop the container
+
+ //launch container if it is a System container or not in dev mode
+ if (!chaincodeSupport.userRunsCC || cds.ExecEnv == pb.ChaincodeDeploymentSpec_SYSTEM) && (chrte == nil || chrte.handler == nil) {
+ var targz io.Reader = bytes.NewBuffer(cds.CodePackage)
+ _, err = chaincodeSupport.launchAndWaitForRegister(context, cds, cID, t.Uuid, cLang, targz)
+ if err != nil {
+ chaincodeLogger.Errorf("launchAndWaitForRegister failed %s", err)
+ return cID, cMsg, err
+ }
+ }
+
+ if err == nil {
+ //send init (if (f,args)) and wait for ready state
+ err = chaincodeSupport.sendInitOrReady(context, t.Uuid, chaincode, f, initargs, chaincodeSupport.ccStartupTimeout, t, depTx)
+ if err != nil {
+ chaincodeLogger.Errorf("sending init failed(%s)", err)
+ err = fmt.Errorf("Failed to init chaincode(%s)", err)
+ errIgnore := chaincodeSupport.Stop(context, cds)
+ if errIgnore != nil {
+ chaincodeLogger.Errorf("stop failed %s(%s)", errIgnore, err)
+ }
+ }
+ chaincodeLogger.Debug("sending init completed")
+ }
+
+ chaincodeLogger.Debug("LaunchChaincode complete")
+
+ return cID, cMsg, err
+}
+
+// getSecHelper returns the security help set from NewChaincodeSupport
+func (chaincodeSupport *ChaincodeSupport) getSecHelper() crypto.Peer {
+ return chaincodeSupport.secHelper
+}
+
+//getVMType - just returns a string for now. Another possibility is to use a factory method to
+//return a VM executor
+func (chaincodeSupport *ChaincodeSupport) getVMType(cds *pb.ChaincodeDeploymentSpec) (string, error) {
+ if cds.ExecEnv == pb.ChaincodeDeploymentSpec_SYSTEM {
+ return container.SYSTEM, nil
+ }
+ return container.DOCKER, nil
+}
+
+// Deploy deploys the chaincode if not in development mode where user is running the chaincode.
+func (chaincodeSupport *ChaincodeSupport) Deploy(context context.Context, t *pb.Transaction) (*pb.ChaincodeDeploymentSpec, error) {
+ //build the chaincode
+ cds := &pb.ChaincodeDeploymentSpec{}
+ err := proto.Unmarshal(t.Payload, cds)
+ if err != nil {
+ return nil, err
+ }
+ cID := cds.ChaincodeSpec.ChaincodeID
+ cLang := cds.ChaincodeSpec.Type
+ chaincode := cID.Name
+ if err != nil {
+ return cds, err
+ }
+
+ if chaincodeSupport.userRunsCC {
+ chaincodeLogger.Debug("user runs chaincode, not deploying chaincode")
+ return nil, nil
+ }
+
+ chaincodeSupport.runningChaincodes.Lock()
+ //if its in the map, there must be a connected stream...and we are trying to build the code ?!
+ if _, ok := chaincodeSupport.chaincodeHasBeenLaunched(chaincode); ok {
+ chaincodeLogger.Debugf("deploy ?!! there's a chaincode with that name running: %s", chaincode)
+ chaincodeSupport.runningChaincodes.Unlock()
+ return cds, fmt.Errorf("deploy attempted but a chaincode with same name running %s", chaincode)
+ }
+ chaincodeSupport.runningChaincodes.Unlock()
+
+ args, envs, err := chaincodeSupport.getArgsAndEnv(cID, cLang)
+ if err != nil {
+ return cds, fmt.Errorf("error getting args for chaincode %s", err)
+ }
+
+ var targz io.Reader = bytes.NewBuffer(cds.CodePackage)
+ cir := &container.CreateImageReq{CCID: ccintf.CCID{ChaincodeSpec: cds.ChaincodeSpec, NetworkID: chaincodeSupport.peerNetworkID, PeerID: chaincodeSupport.peerID}, Args: args, Reader: targz, Env: envs}
+
+ vmtype, _ := chaincodeSupport.getVMType(cds)
+
+ chaincodeLogger.Debugf("deploying chaincode %s(networkid:%s,peerid:%s)", chaincode, chaincodeSupport.peerNetworkID, chaincodeSupport.peerID)
+
+ //create image and create container
+ _, err = container.VMCProcess(context, vmtype, cir)
+ if err != nil {
+ err = fmt.Errorf("Error starting container: %s", err)
+ }
+
+ return cds, err
+}
+
+// HandleChaincodeStream implements ccintf.HandleChaincodeStream for all vms to call with appropriate stream
+func (chaincodeSupport *ChaincodeSupport) HandleChaincodeStream(ctxt context.Context, stream ccintf.ChaincodeStream) error {
+ return HandleChaincodeStream(chaincodeSupport, ctxt, stream)
+}
+
+// Register the bidi stream entry point called by chaincode to register with the Peer.
+func (chaincodeSupport *ChaincodeSupport) Register(stream pb.ChaincodeSupport_RegisterServer) error {
+ return chaincodeSupport.HandleChaincodeStream(stream.Context(), stream)
+}
+
+// createTransactionMessage creates a transaction message.
+func createTransactionMessage(uuid string, cMsg *pb.ChaincodeInput) (*pb.ChaincodeMessage, error) {
+ payload, err := proto.Marshal(cMsg)
+ if err != nil {
+ fmt.Printf(err.Error())
+ return nil, err
+ }
+ return &pb.ChaincodeMessage{Type: pb.ChaincodeMessage_TRANSACTION, Payload: payload, Uuid: uuid}, nil
+}
+
+// createQueryMessage creates a query message.
+func createQueryMessage(uuid string, cMsg *pb.ChaincodeInput) (*pb.ChaincodeMessage, error) {
+ payload, err := proto.Marshal(cMsg)
+ if err != nil {
+ return nil, err
+ }
+ return &pb.ChaincodeMessage{Type: pb.ChaincodeMessage_QUERY, Payload: payload, Uuid: uuid}, nil
+}
+
+// Execute executes a transaction and waits for it to complete until a timeout value.
+func (chaincodeSupport *ChaincodeSupport) Execute(ctxt context.Context, chaincode string, msg *pb.ChaincodeMessage, timeout time.Duration, tx *pb.Transaction) (*pb.ChaincodeMessage, error) {
+ chaincodeSupport.runningChaincodes.Lock()
+ //we expect the chaincode to be running... sanity check
+ chrte, ok := chaincodeSupport.chaincodeHasBeenLaunched(chaincode)
+ if !ok {
+ chaincodeSupport.runningChaincodes.Unlock()
+ chaincodeLogger.Debugf("cannot execute-chaincode is not running: %s", chaincode)
+ return nil, fmt.Errorf("Cannot execute transaction or query for %s", chaincode)
+ }
+ chaincodeSupport.runningChaincodes.Unlock()
+
+ var notfy chan *pb.ChaincodeMessage
+ var err error
+ if notfy, err = chrte.handler.sendExecuteMessage(msg, tx); err != nil {
+ return nil, fmt.Errorf("Error sending %s: %s", msg.Type.String(), err)
+ }
+ var ccresp *pb.ChaincodeMessage
+ select {
+ case ccresp = <-notfy:
+ //response is sent to user or calling chaincode. ChaincodeMessage_ERROR and ChaincodeMessage_QUERY_ERROR
+ //are typically treated as error
+ case <-time.After(timeout):
+ err = fmt.Errorf("Timeout expired while executing transaction")
+ }
+
+ //our responsibility to delete transaction context if sendExecuteMessage succeeded
+ chrte.handler.deleteTxContext(msg.Uuid)
+
+ return ccresp, err
+}
diff --git a/core/chaincode/chaincodetest.yaml b/core/chaincode/chaincodetest.yaml
new file mode 100644
index 00000000000..ab6a75cc6e1
--- /dev/null
+++ b/core/chaincode/chaincodetest.yaml
@@ -0,0 +1,545 @@
+# CA server parameters
+#
+server:
+ # current version of the CA
+ version: "0.1"
+
+ # limits the number of operating system threads used by the CA
+ gomaxprocs: 2
+
+ # path to the OBC state directory and CA state subdirectory
+ # rootpath: "."
+ # cadir: ".ca"
+
+ # port the CA services are listening on
+ port: ":20051"
+
+ # TLS certificate and key file paths
+ tls:
+
+security:
+ # Can be 256 or 384
+ # Must be the same as in core.yaml
+ level: 256
+
+# Enabling/disabling different logging levels of the CA.
+#
+logging:
+ trace: 0
+ info: 1
+ warning: 1
+ error: 1
+ panic: 1
+
+# Enable attribute encryption in TCerts generated by TCA
+tca:
+ attribute-encryption:
+ enabled: true
+
+# Default attributes for Attribute Certificate Authority
+aca:
+ attributes:
+ attribute-entry-0: user1;bank_a;company;ACompany;2015-01-01T00:00:00-03:00;;
+ attribute-entry-1: user1;bank_a;position;Software Staff;2015-01-01T00:00:00-03:00;2015-07-12T23:59:59-03:00;
+ attribute-entry-2: user1;bank_a;position;Software Engineer;2015-07-13T00:00:00-03:00;;
+ attribute-entry-3: user2;bank_a;company;ACompany;2001-02-02T00:00:00-03:00;;
+ attribute-entry-4: user2;bank_a;position;Project Manager;2001-02-02T00:00:00-03:00;;
+ address: localhost:20051
+ server-name: acap
+ enabled: true
+
+# Default users to be registered with the CA on first launch. The role is a binary OR
+# of the different roles a user can have:
+#
+# - simple client such as a wallet: CLIENT
+# - non-validating peer: PEER
+# - validating client: VALIDATOR
+# - auditing client: AUDITOR
+#
+eca:
+ affiliations:
+ banks_and_institutions:
+ banks:
+ - bank_a
+ - bank_b
+ - bank_c
+ institutions:
+ - institution_a
+ users:
+ # :
+ lukas: 1 NPKYL39uKbkj institution_a
+ diego: 1 DRJ23pEQl16a institution_a
+ jim: 1 6avZQLwcUe9b institution_a
+
+ vp: 4 f3489fy98ghf
+
+###############################################################################
+#
+# CLI section
+#
+###############################################################################
+cli:
+
+ # The address that the cli process will use for callbacks from chaincodes
+ address: 0.0.0.0:30304
+
+
+
+###############################################################################
+#
+# REST section
+#
+###############################################################################
+rest:
+
+ # Enable/disable setting for the REST service. It is recommended to disable
+ # REST service on validators in production deployment and use non-validating
+ # nodes to host REST service
+ enabled: true
+
+ # The address that the REST service will listen on for incoming requests.
+ address: 0.0.0.0:5000
+
+
+###############################################################################
+#
+# LOGGING section
+#
+###############################################################################
+logging:
+
+ # Valid logging levels are case-insensitive strings chosen from
+
+ # CRITICAL | ERROR | WARNING | NOTICE | INFO | DEBUG
+
+ # Logging 'module' names are also strings, however valid module names are
+ # defined at runtime and are not checked for validity during option
+ # processing.
+
+ # Default logging levels are specified here for each of the peer
+ # commands. For commands that have subcommands, the defaults also apply to
+ # all subcommands of the command. These logging levels can be overridden
+ # on the command line using the --logging-level command-line option, or by
+ # setting the CORE_LOGGING_LEVEL environment variable.
+
+ # The logging level specification is of the form
+
+ # [[,...]=][:[[,...]=]...]
+
+ # A logging level by itself is taken as the overall default. Otherwise,
+ # overrides for individual or groups of modules can be specified using the
+ # [,...]= syntax.
+
+ # Examples:
+ # info - Set default to INFO
+ # warning:main,db=debug:chaincode=info - Override default WARNING in main,db,chaincode
+ # chaincode=info:main=debug:db=debug:warning - Same as above
+ peer: debug
+ crypto: info
+ status: warning
+ stop: warning
+ login: warning
+ vm: warning
+ chaincode: warning
+
+
+###############################################################################
+#
+# Peer section
+#
+###############################################################################
+peer:
+
+ # Peer Version following version semantics as described here http://semver.org/
+ # The Peer supplies this version in communications with other Peers
+ version: 0.1.0
+
+ # The Peer id is used for identifying this Peer instance.
+ id: jdoe
+
+ # The privateKey to be used by this peer
+ # privateKey: 794ef087680e2494fa4918fd8fb80fb284b50b57d321a31423fe42b9ccf6216047cea0b66fe8365a8e3f2a8140c6866cc45852e63124668bee1daa9c97da0c2a
+
+ # The networkId allows for logical seperation of networks
+ # networkId: dev
+ # networkId: test
+ networkId: dev
+
+ Dockerfile: |
+ from hyperledger/fabric-baseimage:latest
+ # Copy GOPATH src and install Peer
+ COPY src $GOPATH/src
+ RUN mkdir -p /var/hyperledger/db
+ WORKDIR $GOPATH/src/github.com/hyperledger/fabric/peer/
+ RUN CGO_CFLAGS=" " CGO_LDFLAGS="-lrocksdb -lstdc++ -lm -lz -lbz2 -lsnappy" go install && cp $GOPATH/src/github.com/hyperledger/fabric/peer/core.yaml $GOPATH/bin
+
+
+ # The Address this Peer will listen on
+ listenAddress: 0.0.0.0:21212
+ # The Address this Peer will bind to for providing services
+ address: 0.0.0.0:21212
+ # Whether the Peer should programmatically determine the address to bind to.
+ # This case is useful for docker containers.
+ addressAutoDetect: false
+
+ # Peer port to accept connections on
+ port: 21212
+ # Setting for runtime.GOMAXPROCS(n). If n < 1, it does not change the current setting
+ gomaxprocs: -1
+ workers: 2
+
+ # Sync related configuration
+ sync:
+ blocks:
+ # Channel size for readonly SyncBlocks messages channel for receiving
+ # blocks from oppositie Peer Endpoints.
+ # NOTE: currently messages are not stored and forwarded, but rather
+ # lost if the channel write blocks.
+ channelSize: 10
+ state:
+ snapshot:
+ # Channel size for readonly syncStateSnapshot messages channel
+ # for receiving state deltas for snapshot from oppositie Peer Endpoints.
+ # NOTE: currently messages are not stored and forwarded, but
+ # rather lost if the channel write blocks.
+ channelSize: 50
+ deltas:
+ # Channel size for readonly syncStateDeltas messages channel for
+ # receiving state deltas for a syncBlockRange from oppositie
+ # Peer Endpoints.
+ # NOTE: currently messages are not stored and forwarded,
+ # but rather lost if the channel write blocks.
+ channelSize: 20
+
+ # Validator defines whether this peer is a validating peer or not, and if
+ # it is enabled, what consensus plugin to load
+ validator:
+ enabled: true
+
+ consensus:
+ # Consensus plugin to use. The value is the name of the plugin, e.g. pbft, noops ( this value is case-insensitive)
+ # if the given value is not recognized, we will default to noops
+ plugin: noops
+
+ # total number of consensus messages which will be buffered per connection before delivery is rejected
+ buffersize: 1000
+
+ events:
+ # The address that the Event service will be enabled on the validator
+ address: 0.0.0.0:31315
+
+ # total number of events that could be buffered without blocking the
+ # validator sends
+ buffersize: 100
+
+ # milliseconds timeout for producer to send an event.
+ # if < 0, if buffer full, unblocks immediately and not send
+ # if 0, if buffer full, will block and guarantee the event will be sent out
+ # if > 0, if buffer full, blocks till timeout
+ timeout: 10
+
+ # TLS Settings for p2p communications
+ tls:
+ enabled: true
+ cert:
+ file: testdata/server1.pem
+ key:
+ file: testdata/server1.key
+
+ # The server name use to verify the hostname returned by TLS handshake
+ # The key cert was generated using
+ # openssl req -x509 -nodes -days 3650 -newkey rsa:2048 -keyout testdata/server1.key -out testdata/server1.pem
+ serverhostoverride: dummy
+
+ # PKI member services properties
+ pki:
+ eca:
+ paddr: localhost:20051
+ tca:
+ paddr: localhost:20051
+ tlsca:
+ paddr: localhost:20051
+ tls:
+ enabled: false
+ rootcert:
+ file: tlsca.cert
+ # The server name use to verify the hostname returned by TLS handshake
+ serverhostoverride:
+
+ # Peer discovery settings. Controls how this peer discovers other peers
+ discovery:
+
+ # The root nodes are used for bootstrapping purposes, and generally
+ # supplied through ENV variables
+ rootnode:
+
+ # The duration of time between attempts to asks peers for their connected peers
+ period: 5s
+
+ ## leaving this in for example of sub map entry
+ # testNodes:
+ # - node : 1
+ # ip : 127.0.0.1
+ # port : 21212
+ # - node : 2
+ # ip : 127.0.0.1
+ # port : 21212
+
+ # Should the discovered nodes and their reputations
+ # be stored in DB and persisted between restarts
+ persist: true
+
+ # if peer discovery is off
+ # the peer window will show
+ # only what retrieved by active
+ # peer [true/false]
+ enabled: true
+
+ # number of workers that
+ # test the peers for being
+ # online [1..10]
+ workers: 8
+
+ # the period in seconds with which the discovery
+ # tries to reconnect to successful nodes
+ # 0 means the nodes are not reconnected
+ touchPeriod: 600
+
+ # the maximum nuber of nodes to reconnect to
+ # -1 for unlimited
+ touchMaxNodes: 100
+
+ # Path on the file system where peer will store data
+ fileSystemPath: /var/hyperledger/production
+
+
+ profile:
+ enabled: false
+ listenAddress: 0.0.0.0:6060
+
+###############################################################################
+#
+# VM section
+#
+###############################################################################
+vm:
+
+ # Endpoint of the vm management system. For docker can be one of the following in general
+ # unix:///var/run/docker.sock
+ # http://localhost:2375
+ # https://localhost:2376
+ endpoint: unix:///var/run/docker.sock
+
+ # settings for docker vms
+ docker:
+ tls:
+ enabled: false
+ cert:
+ file: /path/to/server.pem
+ ca:
+ file: /path/to/ca.pem
+ key:
+ file: /path/to/server-key.pem
+
+###############################################################################
+#
+# Chaincode section
+#
+###############################################################################
+chaincode:
+
+ # The id is used by the Chaincode stub to register the executing Chaincode
+ # ID with the Peerand is generally supplied through ENV variables
+ # the Path form of ID is provided when deploying the chaincode. The name is
+ # used for all other requests. The name is really a hashcode
+ # returned by the system in response to the deploy transaction. In
+ # development mode where user runs the chaincode, the name can be any string
+ id:
+ path:
+ name:
+
+ golang:
+
+ # This is the basis for the Golang Dockerfile. Additional commands will
+ # be appended depedendent upon the chaincode specification.
+ Dockerfile: |
+ from hyperledger/fabric-baseimage
+ #from utxo:0.1.0
+ COPY src $GOPATH/src
+ WORKDIR $GOPATH
+
+ car:
+
+ # This is the basis for the CAR Dockerfile. Additional commands will
+ # be appended depedendent upon the chaincode specification.
+ Dockerfile: |
+ FROM hyperledger/fabric-baseimage
+
+ # timeout in millisecs for starting up a container and waiting for Register
+ # to come through. 1sec should be plenty for chaincode unit tests
+ startuptimeout: 1000
+
+ #timeout in millisecs for deploying chaincode from a remote repository.
+ deploytimeout: 60000
+
+ #mode - options are "dev", "net"
+ #dev - in dev mode, user runs the chaincode after starting validator from
+ # command line on local machine
+ #net - in net mode validator will run chaincode in a docker container
+
+ mode: net
+ # typically installpath should not be modified. Otherwise, user must ensure
+ # the chaincode executable is placed in the path specifed by installpath in
+ # the image
+ installpath: /opt/gopath/bin/
+
+ #keepalive in seconds. In situations where the communiction goes through a
+ #proxy that does not support keep-alive, this parameter will maintain connection
+ #between peer and chaincode.
+ #A value <= 0 turns keepalive off
+ keepalive: 1
+###############################################################################
+#
+# Ledger section - ledger configuration encompases both the blockchain
+# and the state
+#
+###############################################################################
+ledger:
+
+ blockchain:
+
+ # Define the genesis block
+ genesisBlock:
+
+ # Deploy chaincodes into the genesis block
+ chaincodes:
+
+ #sample_syscc:
+ # path: github.com/hyperledger/fabric/core/system_chaincode/sample_syscc
+ # type: GOLANG
+ # constructor:
+ # args:
+ # - greetings
+ # - hello world
+
+ # Setting the deploy-system-chaincode property to false will prevent the
+ # deploying of system chaincode at genesis time.
+ deploy-system-chaincode: false
+
+ state:
+
+ # Control the number state deltas that are maintained. This takes additional
+ # disk space, but allow the state to be rolled backwards and forwards
+ # without the need to replay transactions.
+ deltaHistorySize: 500
+
+ # The data structure in which the state will be stored. Different data
+ # structures may offer different performance characteristics.
+ # Options are 'buckettree', 'trie' and 'raw'.
+ # ( Note:'raw' is experimental and incomplete. )
+ # If not set, the default data structure is the 'buckettree'.
+ # This CANNOT be changed after the DB has been created.
+ dataStructure:
+ # The name of the data structure is for storing the state
+ name: buckettree
+ # The data structure specific configurations
+ configs:
+ # configurations for 'bucketree'. These CANNOT be changed after the DB
+ # has been created. 'numBuckets' defines the number of bins that the
+ # state key-values are to be divided
+ numBuckets: 1000003
+ # 'maxGroupingAtEachLevel' defines the number of bins that are grouped
+ #together to construct next level of the merkle-tree (this is applied
+ # repeatedly for constructing the entire tree).
+ maxGroupingAtEachLevel: 5
+ # 'bucketCacheSize' defines the size (in MBs) of the cache that is used to keep
+ # the buckets (from root upto secondlast level) in memory. This cache helps
+ # in making state hash computation faster. A value less than or equals to zero
+ # leads to disabling this caching. This caching helps more if transactions
+ # perform significant writes.
+ bucketCacheSize: 100
+
+ # configurations for 'trie'
+ # 'tire' has no additional configurations exposed as yet
+
+
+###############################################################################
+#
+# Security section - Applied to all entities (client, NVP, VP)
+#
+###############################################################################
+security:
+ # Enable security will force every entity on the network to enroll with obc-ca
+ # and maintain a valid set of certificates in order to communicate with
+ # other peers
+ enabled: false
+ # To enroll NVP or VP with membersrvc. These parameters are for 1 time use.
+ # They will not be valid on subsequent times without un-enroll first.
+ # The values come from off-line registration with obc-ca. For testing, make
+ # sure the values are in membersrvc/membersrvc.yaml file eca.users
+ enrollID: vp
+ enrollSecret: f3489fy98ghf
+ # To enable privacy of transactions (requires security to be enabled). This
+ # encrypts the transaction content during transit and at rest. The state
+ # data is also encrypted
+ privacy: false
+
+ # Can be 256 or 384. If you change here, you have to change also
+ # the same property in membersrvc.yaml to the same value
+ level: 256
+
+ # Can be SHA2 or SHA3. If you change here, you have to change also
+ # the same property in membersrvc.yaml to the same value
+ hashAlgorithm: SHA3
+
+ # TCerts related configuration
+ tcert:
+ batch:
+ # The size of the batch of TCerts
+ size: 200
+ attributes:
+ company: IBM
+ position: "Software Engineer"
+
+
+################################################################################
+#
+# SECTION: STATETRANSFER
+#
+# - This applies to recovery behavior when the replica has detected
+# a state transfer is required
+#
+# - This might happen:
+# - During a view change in response to a faulty primary
+# - After a network outage which has isolated the replica
+# - If the current blockchain/state is determined to be corrupt
+#
+################################################################################
+statetransfer:
+
+ # Should a replica attempt to fix damaged blocks?
+ # In general, this should be set to true, setting to false will cause
+ # the replica to panic, and require a human's intervention to intervene
+ # and fix the corruption
+ recoverdamage: true
+
+ # The number of blocks to retrieve per sync request
+ blocksperrequest: 20
+
+ # The maximum number of state deltas to attempt to retrieve
+ # If more than this number of deltas is required to play the state up to date
+ # then instead the state will be flagged as invalid, and a full copy of the state
+ # will be retrieved instead
+ maxdeltas: 200
+
+ # Timeouts
+ timeout:
+
+ # How long may returning a single block take
+ singleblock: 2s
+
+ # How long may returning a single state delta take
+ singlestatedelta: 2s
+
+ # How long may transferring the complete state take
+ fullstate: 60s
diff --git a/core/chaincode/config.go b/core/chaincode/config.go
new file mode 100644
index 00000000000..91266ad8f72
--- /dev/null
+++ b/core/chaincode/config.go
@@ -0,0 +1,74 @@
+/*
+Copyright IBM Corp. 2016 All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package chaincode
+
+import (
+ "flag"
+ "fmt"
+ "runtime"
+ "strings"
+
+ "github.com/op/go-logging"
+ "github.com/spf13/viper"
+)
+
+// Config the config wrapper structure
+type Config struct {
+}
+
+func init() {
+
+}
+
+// SetupTestLogging setup the logging during test execution
+func SetupTestLogging() {
+ level, err := logging.LogLevel(viper.GetString("logging.peer"))
+ if err == nil {
+ // No error, use the setting
+ logging.SetLevel(level, "main")
+ logging.SetLevel(level, "server")
+ logging.SetLevel(level, "peer")
+ } else {
+ chaincodeLogger.Warningf("Log level not recognized '%s', defaulting to %s: %s", viper.GetString("logging.peer"), logging.ERROR, err)
+ logging.SetLevel(logging.ERROR, "main")
+ logging.SetLevel(logging.ERROR, "server")
+ logging.SetLevel(logging.ERROR, "peer")
+ }
+}
+
+// SetupTestConfig setup the config during test execution
+func SetupTestConfig() {
+ flag.Parse()
+
+ // Now set the configuration file
+ viper.SetEnvPrefix("CORE")
+ viper.AutomaticEnv()
+ replacer := strings.NewReplacer(".", "_")
+ viper.SetEnvKeyReplacer(replacer)
+ viper.SetConfigName("chaincodetest") // name of config file (without extension)
+ viper.AddConfigPath("./") // path to look for the config file in
+ err := viper.ReadInConfig() // Find and read the config file
+ if err != nil { // Handle errors reading the config file
+ panic(fmt.Errorf("Fatal error config file: %s \n", err))
+ }
+
+ SetupTestLogging()
+
+ // Set the number of maxprocs
+ var numProcsDesired = viper.GetInt("peer.gomaxprocs")
+ chaincodeLogger.Debugf("setting Number of procs to %d, was %d\n", numProcsDesired, runtime.GOMAXPROCS(2))
+}
diff --git a/core/chaincode/exectransaction.go b/core/chaincode/exectransaction.go
new file mode 100644
index 00000000000..56bf26359c2
--- /dev/null
+++ b/core/chaincode/exectransaction.go
@@ -0,0 +1,219 @@
+/*
+Copyright IBM Corp. 2016 All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package chaincode
+
+import (
+ "errors"
+ "fmt"
+ "time"
+
+ "github.com/golang/protobuf/proto"
+ "golang.org/x/net/context"
+
+ "github.com/hyperledger/fabric/core/ledger"
+ "github.com/hyperledger/fabric/events/producer"
+ pb "github.com/hyperledger/fabric/protos"
+)
+
+//Execute - execute transaction or a query
+func Execute(ctxt context.Context, chain *ChaincodeSupport, t *pb.Transaction) ([]byte, *pb.ChaincodeEvent, error) {
+ var err error
+
+ // get a handle to ledger to mark the begin/finish of a tx
+ ledger, ledgerErr := ledger.GetLedger()
+ if ledgerErr != nil {
+ return nil, nil, fmt.Errorf("Failed to get handle to ledger (%s)", ledgerErr)
+ }
+
+ if secHelper := chain.getSecHelper(); nil != secHelper {
+ var err error
+ t, err = secHelper.TransactionPreExecution(t)
+ // Note that t is now decrypted and is a deep clone of the original input t
+ if nil != err {
+ return nil, nil, err
+ }
+ }
+
+ if t.Type == pb.Transaction_CHAINCODE_DEPLOY {
+ _, err := chain.Deploy(ctxt, t)
+ if err != nil {
+ return nil, nil, fmt.Errorf("Failed to deploy chaincode spec(%s)", err)
+ }
+
+ //launch and wait for ready
+ markTxBegin(ledger, t)
+ _, _, err = chain.Launch(ctxt, t)
+ if err != nil {
+ markTxFinish(ledger, t, false)
+ return nil, nil, fmt.Errorf("%s", err)
+ }
+ markTxFinish(ledger, t, true)
+ } else if t.Type == pb.Transaction_CHAINCODE_INVOKE || t.Type == pb.Transaction_CHAINCODE_QUERY {
+ //will launch if necessary (and wait for ready)
+ cID, cMsg, err := chain.Launch(ctxt, t)
+ if err != nil {
+ return nil, nil, fmt.Errorf("Failed to launch chaincode spec(%s)", err)
+ }
+
+ //this should work because it worked above...
+ chaincode := cID.Name
+
+ if err != nil {
+ return nil, nil, fmt.Errorf("Failed to stablish stream to container %s", chaincode)
+ }
+
+ // TODO: Need to comment next line and uncomment call to getTimeout, when transaction blocks are being created
+ timeout := time.Duration(30000) * time.Millisecond
+ //timeout, err := getTimeout(cID)
+
+ if err != nil {
+ return nil, nil, fmt.Errorf("Failed to retrieve chaincode spec(%s)", err)
+ }
+
+ var ccMsg *pb.ChaincodeMessage
+ if t.Type == pb.Transaction_CHAINCODE_INVOKE {
+ ccMsg, err = createTransactionMessage(t.Uuid, cMsg)
+ if err != nil {
+ return nil, nil, fmt.Errorf("Failed to transaction message(%s)", err)
+ }
+ } else {
+ ccMsg, err = createQueryMessage(t.Uuid, cMsg)
+ if err != nil {
+ return nil, nil, fmt.Errorf("Failed to query message(%s)", err)
+ }
+ }
+
+ markTxBegin(ledger, t)
+ resp, err := chain.Execute(ctxt, chaincode, ccMsg, timeout, t)
+ if err != nil {
+ // Rollback transaction
+ markTxFinish(ledger, t, false)
+ return nil, nil, fmt.Errorf("Failed to execute transaction or query(%s)", err)
+ } else if resp == nil {
+ // Rollback transaction
+ markTxFinish(ledger, t, false)
+ return nil, nil, fmt.Errorf("Failed to receive a response for (%s)", t.Uuid)
+ } else {
+ if resp.ChaincodeEvent != nil {
+ resp.ChaincodeEvent.ChaincodeID = chaincode
+ resp.ChaincodeEvent.TxID = t.Uuid
+ }
+
+ if resp.Type == pb.ChaincodeMessage_COMPLETED || resp.Type == pb.ChaincodeMessage_QUERY_COMPLETED {
+ // Success
+ markTxFinish(ledger, t, true)
+ return resp.Payload, resp.ChaincodeEvent, nil
+ } else if resp.Type == pb.ChaincodeMessage_ERROR || resp.Type == pb.ChaincodeMessage_QUERY_ERROR {
+ // Rollback transaction
+ markTxFinish(ledger, t, false)
+ return nil, resp.ChaincodeEvent, fmt.Errorf("Transaction or query returned with failure: %s", string(resp.Payload))
+ }
+ markTxFinish(ledger, t, false)
+ return resp.Payload, nil, fmt.Errorf("receive a response for (%s) but in invalid state(%d)", t.Uuid, resp.Type)
+ }
+
+ } else {
+ err = fmt.Errorf("Invalid transaction type %s", t.Type.String())
+ }
+ return nil, nil, err
+}
+
+//ExecuteTransactions - will execute transactions on the array one by one
+//will return an array of errors one for each transaction. If the execution
+//succeeded, array element will be nil. returns []byte of state hash or
+//error
+func ExecuteTransactions(ctxt context.Context, cname ChainName, xacts []*pb.Transaction) (succeededTXs []*pb.Transaction, stateHash []byte, ccevents []*pb.ChaincodeEvent, txerrs []error, err error) {
+ var chain = GetChain(cname)
+ if chain == nil {
+ // TODO: We should never get here, but otherwise a good reminder to better handle
+ panic(fmt.Sprintf("[ExecuteTransactions]Chain %s not found\n", cname))
+ }
+
+ txerrs = make([]error, len(xacts))
+ ccevents = make([]*pb.ChaincodeEvent, len(xacts))
+ var succeededTxs = make([]*pb.Transaction, 0)
+ for i, t := range xacts {
+ _, ccevents[i], txerrs[i] = Execute(ctxt, chain, t)
+ if txerrs[i] == nil {
+ succeededTxs = append(succeededTxs, t)
+ } else {
+ sendTxRejectedEvent(xacts[i], txerrs[i].Error())
+ }
+ }
+
+ var lgr *ledger.Ledger
+ lgr, err = ledger.GetLedger()
+ if err == nil {
+ stateHash, err = lgr.GetTempStateHash()
+ }
+
+ return succeededTxs, stateHash, ccevents, txerrs, err
+}
+
+// GetSecureContext returns the security context from the context object or error
+// Security context is nil if security is off from core.yaml file
+// func GetSecureContext(ctxt context.Context) (crypto.Peer, error) {
+// var err error
+// temp := ctxt.Value("security")
+// if nil != temp {
+// if secCxt, ok := temp.(crypto.Peer); ok {
+// return secCxt, nil
+// }
+// err = errors.New("Failed to convert security context type")
+// }
+// return nil, err
+// }
+
+var errFailedToGetChainCodeSpecForTransaction = errors.New("Failed to get ChainCodeSpec from Transaction")
+
+func getTimeout(cID *pb.ChaincodeID) (time.Duration, error) {
+ ledger, err := ledger.GetLedger()
+ if err == nil {
+ chaincodeID := cID.Name
+ txUUID, err := ledger.GetState(chaincodeID, "github.com_openblockchain_obc-peer_chaincode_id", true)
+ if err == nil {
+ tx, err := ledger.GetTransactionByUUID(string(txUUID))
+ if err == nil {
+ chaincodeDeploymentSpec := &pb.ChaincodeDeploymentSpec{}
+ proto.Unmarshal(tx.Payload, chaincodeDeploymentSpec)
+ chaincodeSpec := chaincodeDeploymentSpec.GetChaincodeSpec()
+ timeout := time.Duration(time.Duration(chaincodeSpec.Timeout) * time.Millisecond)
+ return timeout, nil
+ }
+ }
+ }
+
+ return -1, errFailedToGetChainCodeSpecForTransaction
+}
+
+func markTxBegin(ledger *ledger.Ledger, t *pb.Transaction) {
+ if t.Type == pb.Transaction_CHAINCODE_QUERY {
+ return
+ }
+ ledger.TxBegin(t.Uuid)
+}
+
+func markTxFinish(ledger *ledger.Ledger, t *pb.Transaction, successful bool) {
+ if t.Type == pb.Transaction_CHAINCODE_QUERY {
+ return
+ }
+ ledger.TxFinished(t.Uuid, successful)
+}
+
+func sendTxRejectedEvent(tx *pb.Transaction, errorMsg string) {
+ producer.Send(producer.CreateRejectionEvent(tx, errorMsg))
+}
diff --git a/core/chaincode/exectransaction_test.go b/core/chaincode/exectransaction_test.go
new file mode 100644
index 00000000000..43a33b0a4da
--- /dev/null
+++ b/core/chaincode/exectransaction_test.go
@@ -0,0 +1,1439 @@
+/*
+Copyright IBM Corp. 2016 All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package chaincode
+
+import (
+ "fmt"
+ "io/ioutil"
+ "net"
+ "os"
+ "strconv"
+ "strings"
+ "sync"
+ "testing"
+ "time"
+
+ "path/filepath"
+
+ "github.com/hyperledger/fabric/core/container"
+ "github.com/hyperledger/fabric/core/container/ccintf"
+ "github.com/hyperledger/fabric/core/crypto"
+ "github.com/hyperledger/fabric/core/ledger"
+ "github.com/hyperledger/fabric/core/util"
+ "github.com/hyperledger/fabric/membersrvc/ca"
+ pb "github.com/hyperledger/fabric/protos"
+ "github.com/spf13/viper"
+ "golang.org/x/net/context"
+ "google.golang.org/grpc"
+ "google.golang.org/grpc/credentials"
+ "google.golang.org/grpc/grpclog"
+)
+
+// attributes to request in the batch of tcerts while deploying, invoking or querying
+var attributes = []string{"company", "position"}
+
+func getNowMillis() int64 {
+ nanos := time.Now().UnixNano()
+ return nanos / 1000000
+}
+
+//initialize memberservices and startup
+func initMemSrvc() (net.Listener, error) {
+ //start clean
+ finitMemSrvc(nil)
+
+ ca.LogInit(ioutil.Discard, os.Stdout, os.Stdout, os.Stderr, os.Stdout)
+ ca.CacheConfiguration() // Cache configuration
+
+ aca := ca.NewACA()
+ eca := ca.NewECA()
+ tca := ca.NewTCA(eca)
+ tlsca := ca.NewTLSCA(eca)
+
+ sockp, err := net.Listen("tcp", viper.GetString("server.port"))
+ if err != nil {
+ return nil, err
+ }
+
+ var opts []grpc.ServerOption
+ server := grpc.NewServer(opts...)
+
+ aca.Start(server)
+ eca.Start(server)
+ tca.Start(server)
+ tlsca.Start(server)
+
+ go server.Serve(sockp)
+
+ return sockp, nil
+}
+
+//cleanup memberservice debris
+func finitMemSrvc(lis net.Listener) {
+ closeListenerAndSleep(lis)
+ os.RemoveAll(filepath.Join(os.TempDir(), "ca"))
+}
+
+//initialize peer and start up. If security==enabled, login as vp
+func initPeer() (net.Listener, error) {
+ //start clean
+ finitPeer(nil)
+ var opts []grpc.ServerOption
+ if viper.GetBool("peer.tls.enabled") {
+ creds, err := credentials.NewServerTLSFromFile(viper.GetString("peer.tls.cert.file"), viper.GetString("peer.tls.key.file"))
+ if err != nil {
+ return nil, fmt.Errorf("Failed to generate credentials %v", err)
+ }
+ opts = []grpc.ServerOption{grpc.Creds(creds)}
+ }
+ grpcServer := grpc.NewServer(opts...)
+
+ peerAddress := viper.GetString("peer.address")
+ lis, err := net.Listen("tcp", peerAddress)
+ if err != nil {
+ return nil, fmt.Errorf("Error starting peer listener %s", err)
+ }
+
+ getPeerEndpoint := func() (*pb.PeerEndpoint, error) {
+ return &pb.PeerEndpoint{ID: &pb.PeerID{Name: "testpeer"}, Address: peerAddress}, nil
+ }
+
+ // Install security object for peer
+ var secHelper crypto.Peer
+ if viper.GetBool("security.enabled") {
+ enrollID := viper.GetString("security.enrollID")
+ enrollSecret := viper.GetString("security.enrollSecret")
+ if err = crypto.RegisterValidator(enrollID, nil, enrollID, enrollSecret); nil != err {
+ return nil, err
+ }
+ secHelper, err = crypto.InitValidator(enrollID, nil)
+ if nil != err {
+ return nil, err
+ }
+ }
+
+ ccStartupTimeout := time.Duration(chaincodeStartupTimeoutDefault) * time.Millisecond
+ pb.RegisterChaincodeSupportServer(grpcServer, NewChaincodeSupport(DefaultChain, getPeerEndpoint, false, ccStartupTimeout, secHelper))
+
+ go grpcServer.Serve(lis)
+
+ return lis, nil
+}
+
+func finitPeer(lis net.Listener) {
+ closeListenerAndSleep(lis)
+ os.RemoveAll(filepath.Join(os.TempDir(), "hyperledger"))
+}
+
+// Build a chaincode.
+func getDeploymentSpec(context context.Context, spec *pb.ChaincodeSpec) (*pb.ChaincodeDeploymentSpec, error) {
+ fmt.Printf("getting deployment spec for chaincode spec: %v\n", spec)
+ codePackageBytes, err := container.GetChaincodePackageBytes(spec)
+ if err != nil {
+ return nil, err
+ }
+ chaincodeDeploymentSpec := &pb.ChaincodeDeploymentSpec{ChaincodeSpec: spec, CodePackage: codePackageBytes}
+ return chaincodeDeploymentSpec, nil
+}
+
+func createDeployTransaction(dspec *pb.ChaincodeDeploymentSpec, uuid string) (*pb.Transaction, error) {
+ var tx *pb.Transaction
+ var err error
+ var sec crypto.Client
+ if dspec.ChaincodeSpec.SecureContext != "" {
+ sec, err = crypto.InitClient(dspec.ChaincodeSpec.SecureContext, nil)
+ defer crypto.CloseClient(sec)
+
+ if nil != err {
+ return nil, err
+ }
+
+ tx, err = sec.NewChaincodeDeployTransaction(dspec, uuid, attributes...)
+ if nil != err {
+ return nil, err
+ }
+ } else {
+ tx, err = pb.NewChaincodeDeployTransaction(dspec, uuid)
+ if err != nil {
+ return nil, fmt.Errorf("Error deploying chaincode: %s ", err)
+ }
+ }
+ return tx, nil
+}
+
+func createTransaction(invokeTx bool, spec *pb.ChaincodeInvocationSpec, uuid string) (*pb.Transaction, error) {
+ var tx *pb.Transaction
+ var err error
+ var sec crypto.Client
+ if nil != sec {
+ sec, err = crypto.InitClient(spec.ChaincodeSpec.SecureContext, nil)
+ defer crypto.CloseClient(sec)
+ if nil != err {
+ return nil, err
+ }
+ if invokeTx {
+ tx, err = sec.NewChaincodeExecute(spec, uuid, attributes...)
+ } else {
+ tx, err = sec.NewChaincodeQuery(spec, uuid, attributes...)
+ }
+ if nil != err {
+ return nil, err
+ }
+ } else {
+ var t pb.Transaction_Type
+ if invokeTx {
+ t = pb.Transaction_CHAINCODE_INVOKE
+ } else {
+ t = pb.Transaction_CHAINCODE_QUERY
+ }
+ tx, err = pb.NewChaincodeExecute(spec, uuid, t)
+ if nil != err {
+ return nil, err
+ }
+ }
+ return tx, nil
+}
+
+// Deploy a chaincode - i.e., build and initialize.
+func deploy(ctx context.Context, spec *pb.ChaincodeSpec) ([]byte, error) {
+ // First build and get the deployment spec
+ chaincodeDeploymentSpec, err := getDeploymentSpec(ctx, spec)
+ if err != nil {
+ return nil, err
+ }
+
+ tid := chaincodeDeploymentSpec.ChaincodeSpec.ChaincodeID.Name
+
+ // Now create the Transactions message and send to Peer.
+ transaction, err := createDeployTransaction(chaincodeDeploymentSpec, tid)
+ if err != nil {
+ return nil, fmt.Errorf("Error deploying chaincode: %s ", err)
+ }
+
+ ledger, err := ledger.GetLedger()
+ if err != nil {
+ return nil, fmt.Errorf("Failed to get handle to ledger: %s ", err)
+ }
+ ledger.BeginTxBatch("1")
+ b, _, err := Execute(ctx, GetChain(DefaultChain), transaction)
+ if err != nil {
+ return nil, fmt.Errorf("Error deploying chaincode: %s", err)
+ }
+ ledger.CommitTxBatch("1", []*pb.Transaction{transaction}, nil, nil)
+
+ return b, err
+}
+
+func deploy2(ctx context.Context, chaincodeDeploymentSpec *pb.ChaincodeDeploymentSpec) ([]byte, error) {
+ tid := chaincodeDeploymentSpec.ChaincodeSpec.ChaincodeID.Name
+
+ // Now create the Transactions message and send to Peer.
+ transaction, err := createDeployTransaction(chaincodeDeploymentSpec, tid)
+ if err != nil {
+ return nil, fmt.Errorf("Error deploying chaincode: %s ", err)
+ }
+
+ ledger, err := ledger.GetLedger()
+ ledger.BeginTxBatch("1")
+ b, _, err := Execute(ctx, GetChain(DefaultChain), transaction)
+ if err != nil {
+ return nil, fmt.Errorf("Error deploying chaincode: %s", err)
+ }
+ ledger.CommitTxBatch("1", []*pb.Transaction{transaction}, nil, nil)
+
+ return b, err
+}
+
+// Invoke or query a chaincode.
+func invoke(ctx context.Context, spec *pb.ChaincodeSpec, typ pb.Transaction_Type) (*pb.ChaincodeEvent, string, []byte, error) {
+ chaincodeInvocationSpec := &pb.ChaincodeInvocationSpec{ChaincodeSpec: spec}
+
+ // Now create the Transactions message and send to Peer.
+ uuid := util.GenerateUUID()
+
+ var transaction *pb.Transaction
+ var err error
+ if typ == pb.Transaction_CHAINCODE_QUERY {
+ transaction, err = createTransaction(false, chaincodeInvocationSpec, uuid)
+ } else {
+ transaction, err = createTransaction(true, chaincodeInvocationSpec, uuid)
+ }
+ if err != nil {
+ return nil, uuid, nil, fmt.Errorf("Error invoking chaincode: %s ", err)
+ }
+
+ var retval []byte
+ var execErr error
+ var ccevt *pb.ChaincodeEvent
+ if typ == pb.Transaction_CHAINCODE_QUERY {
+ retval, ccevt, execErr = Execute(ctx, GetChain(DefaultChain), transaction)
+ } else {
+ ledger, _ := ledger.GetLedger()
+ ledger.BeginTxBatch("1")
+ retval, ccevt, execErr = Execute(ctx, GetChain(DefaultChain), transaction)
+ if err != nil {
+ return nil, uuid, nil, fmt.Errorf("Error invoking chaincode: %s ", err)
+ }
+ ledger.CommitTxBatch("1", []*pb.Transaction{transaction}, nil, nil)
+ }
+
+ return ccevt, uuid, retval, execErr
+}
+
+func closeListenerAndSleep(l net.Listener) {
+ if l != nil {
+ l.Close()
+ time.Sleep(2 * time.Second)
+ }
+}
+
+func executeDeployTransaction(t *testing.T, url string) {
+ var opts []grpc.ServerOption
+ if viper.GetBool("peer.tls.enabled") {
+ creds, err := credentials.NewServerTLSFromFile(viper.GetString("peer.tls.cert.file"), viper.GetString("peer.tls.key.file"))
+ if err != nil {
+ grpclog.Fatalf("Failed to generate credentials %v", err)
+ }
+ opts = []grpc.ServerOption{grpc.Creds(creds)}
+ }
+ grpcServer := grpc.NewServer(opts...)
+ viper.Set("peer.fileSystemPath", "/var/hyperledger/test/tmpdb")
+
+ //lis, err := net.Listen("tcp", viper.GetString("peer.address"))
+
+ //use a different address than what we usually use for "peer"
+ //we override the peerAddress set in chaincode_support.go
+ peerAddress := "0.0.0.0:21212"
+ lis, err := net.Listen("tcp", peerAddress)
+ if err != nil {
+ t.Fail()
+ t.Logf("Error starting peer listener %s", err)
+ return
+ }
+
+ getPeerEndpoint := func() (*pb.PeerEndpoint, error) {
+ return &pb.PeerEndpoint{ID: &pb.PeerID{Name: "testpeer"}, Address: peerAddress}, nil
+ }
+
+ ccStartupTimeout := time.Duration(chaincodeStartupTimeoutDefault) * time.Millisecond
+ pb.RegisterChaincodeSupportServer(grpcServer, NewChaincodeSupport(DefaultChain, getPeerEndpoint, false, ccStartupTimeout, nil))
+
+ go grpcServer.Serve(lis)
+
+ var ctxt = context.Background()
+
+ f := "init"
+ args := []string{"a", "100", "b", "200"}
+ spec := &pb.ChaincodeSpec{Type: 1, ChaincodeID: &pb.ChaincodeID{Path: url}, CtorMsg: &pb.ChaincodeInput{Function: f, Args: args}}
+ _, err = deploy(ctxt, spec)
+ chaincodeID := spec.ChaincodeID.Name
+ if err != nil {
+ GetChain(DefaultChain).Stop(ctxt, &pb.ChaincodeDeploymentSpec{ChaincodeSpec: spec})
+ closeListenerAndSleep(lis)
+ t.Fail()
+ t.Logf("Error deploying <%s>: %s", chaincodeID, err)
+ return
+ }
+
+ GetChain(DefaultChain).Stop(ctxt, &pb.ChaincodeDeploymentSpec{ChaincodeSpec: spec})
+ closeListenerAndSleep(lis)
+}
+
+// Test deploy of a transaction
+func TestExecuteDeployTransaction(t *testing.T) {
+ executeDeployTransaction(t, "github.com/hyperledger/fabric/examples/chaincode/go/chaincode_example01")
+}
+
+// Test deploy of a transaction with a GOPATH with multiple elements
+func TestGopathExecuteDeployTransaction(t *testing.T) {
+ // add a trailing slash to GOPATH
+ // and a couple of elements - it doesn't matter what they are
+ os.Setenv("GOPATH", os.Getenv("GOPATH")+string(os.PathSeparator)+string(os.PathListSeparator)+"/tmp/foo"+string(os.PathListSeparator)+"/tmp/bar")
+ fmt.Printf("set GOPATH to: \"%s\"\n", os.Getenv("GOPATH"))
+ executeDeployTransaction(t, "github.com/hyperledger/fabric/examples/chaincode/go/chaincode_example01")
+}
+
+// Test deploy of a transaction with a chaincode over HTTP.
+func TestHTTPExecuteDeployTransaction(t *testing.T) {
+ // The chaincode used here cannot be from the fabric repo
+ // itself or it won't be downloaded because it will be found
+ // in GOPATH, which would defeat the test
+ executeDeployTransaction(t, "http://github.com/hyperledger/fabric-test-resources/examples/chaincode/go/chaincode_example01")
+}
+
+// Check the correctness of the final state after transaction execution.
+func checkFinalState(uuid string, chaincodeID string) error {
+ // Check the state in the ledger
+ ledgerObj, ledgerErr := ledger.GetLedger()
+ if ledgerErr != nil {
+ return fmt.Errorf("Error checking ledger for <%s>: %s", chaincodeID, ledgerErr)
+ }
+
+ // Invoke ledger to get state
+ var Aval, Bval int
+ resbytes, resErr := ledgerObj.GetState(chaincodeID, "a", false)
+ if resErr != nil {
+ return fmt.Errorf("Error retrieving state from ledger for <%s>: %s", chaincodeID, resErr)
+ }
+ fmt.Printf("Got string: %s\n", string(resbytes))
+ Aval, resErr = strconv.Atoi(string(resbytes))
+ if resErr != nil {
+ return fmt.Errorf("Error retrieving state from ledger for <%s>: %s", chaincodeID, resErr)
+ }
+ if Aval != 90 {
+ return fmt.Errorf("Incorrect result. Aval is wrong for <%s>", chaincodeID)
+ }
+
+ resbytes, resErr = ledgerObj.GetState(chaincodeID, "b", false)
+ if resErr != nil {
+ return fmt.Errorf("Error retrieving state from ledger for <%s>: %s", chaincodeID, resErr)
+ }
+ Bval, resErr = strconv.Atoi(string(resbytes))
+ if resErr != nil {
+ return fmt.Errorf("Error retrieving state from ledger for <%s>: %s", chaincodeID, resErr)
+ }
+ if Bval != 210 {
+ return fmt.Errorf("Incorrect result. Bval is wrong for <%s>", chaincodeID)
+ }
+
+ // Success
+ fmt.Printf("Aval = %d, Bval = %d\n", Aval, Bval)
+ return nil
+}
+
+// Invoke chaincode_example02
+func invokeExample02Transaction(ctxt context.Context, cID *pb.ChaincodeID, args []string, destroyImage bool) error {
+
+ f := "init"
+ argsDeploy := []string{"a", "100", "b", "200"}
+ spec := &pb.ChaincodeSpec{Type: 1, ChaincodeID: cID, CtorMsg: &pb.ChaincodeInput{Function: f, Args: argsDeploy}}
+ _, err := deploy(ctxt, spec)
+ chaincodeID := spec.ChaincodeID.Name
+ if err != nil {
+ return fmt.Errorf("Error deploying <%s>: %s", chaincodeID, err)
+ }
+
+ time.Sleep(time.Second)
+
+ if destroyImage {
+ GetChain(DefaultChain).Stop(ctxt, &pb.ChaincodeDeploymentSpec{ChaincodeSpec: spec})
+ dir := container.DestroyImageReq{CCID: ccintf.CCID{ChaincodeSpec: spec, NetworkID: GetChain(DefaultChain).peerNetworkID, PeerID: GetChain(DefaultChain).peerID}, Force: true, NoPrune: true}
+
+ _, err = container.VMCProcess(ctxt, container.DOCKER, dir)
+ if err != nil {
+ err = fmt.Errorf("Error destroying image: %s", err)
+ return err
+ }
+ }
+
+ f = "invoke"
+ spec = &pb.ChaincodeSpec{Type: 1, ChaincodeID: cID, CtorMsg: &pb.ChaincodeInput{Function: f, Args: args}}
+ _, uuid, _, err := invoke(ctxt, spec, pb.Transaction_CHAINCODE_INVOKE)
+ if err != nil {
+ return fmt.Errorf("Error invoking <%s>: %s", chaincodeID, err)
+ }
+
+ err = checkFinalState(uuid, chaincodeID)
+ if err != nil {
+ return fmt.Errorf("Incorrect final state after transaction for <%s>: %s", chaincodeID, err)
+ }
+
+ // Test for delete state
+ f = "delete"
+ delArgs := []string{"a"}
+ spec = &pb.ChaincodeSpec{Type: 1, ChaincodeID: cID, CtorMsg: &pb.ChaincodeInput{Function: f, Args: delArgs}}
+ _, uuid, _, err = invoke(ctxt, spec, pb.Transaction_CHAINCODE_INVOKE)
+ if err != nil {
+ return fmt.Errorf("Error deleting state in <%s>: %s", chaincodeID, err)
+ }
+
+ return nil
+}
+
+func TestExecuteInvokeTransaction(t *testing.T) {
+ var opts []grpc.ServerOption
+
+ //TLS is on by default. This is the ONLY test that does NOT use TLS
+ viper.Set("peer.tls.enabled", false)
+
+ //turn OFF keepalive. All other tests use keepalive
+ viper.Set("peer.chaincode.keepalive", "0")
+
+ if viper.GetBool("peer.tls.enabled") {
+ creds, err := credentials.NewServerTLSFromFile(viper.GetString("peer.tls.cert.file"), viper.GetString("peer.tls.key.file"))
+ if err != nil {
+ grpclog.Fatalf("Failed to generate credentials %v", err)
+ }
+ opts = []grpc.ServerOption{grpc.Creds(creds)}
+ }
+ grpcServer := grpc.NewServer(opts...)
+ viper.Set("peer.fileSystemPath", "/var/hyperledger/test/tmpdb")
+
+ //use a different address than what we usually use for "peer"
+ //we override the peerAddress set in chaincode_support.go
+ peerAddress := "0.0.0.0:21212"
+
+ lis, err := net.Listen("tcp", peerAddress)
+ if err != nil {
+ t.Fail()
+ t.Logf("Error starting peer listener %s", err)
+ return
+ }
+
+ getPeerEndpoint := func() (*pb.PeerEndpoint, error) {
+ return &pb.PeerEndpoint{ID: &pb.PeerID{Name: "testpeer"}, Address: peerAddress}, nil
+ }
+
+ ccStartupTimeout := time.Duration(chaincodeStartupTimeoutDefault) * time.Millisecond
+ pb.RegisterChaincodeSupportServer(grpcServer, NewChaincodeSupport(DefaultChain, getPeerEndpoint, false, ccStartupTimeout, nil))
+
+ go grpcServer.Serve(lis)
+
+ var ctxt = context.Background()
+
+ url := "github.com/hyperledger/fabric/examples/chaincode/go/chaincode_example02"
+ chaincodeID := &pb.ChaincodeID{Path: url}
+
+ args := []string{"a", "b", "10"}
+ err = invokeExample02Transaction(ctxt, chaincodeID, args, true)
+ if err != nil {
+ t.Fail()
+ t.Logf("Error invoking transaction: %s", err)
+ } else {
+ fmt.Printf("Invoke test passed\n")
+ t.Logf("Invoke test passed")
+ }
+
+ GetChain(DefaultChain).Stop(ctxt, &pb.ChaincodeDeploymentSpec{ChaincodeSpec: &pb.ChaincodeSpec{ChaincodeID: chaincodeID}})
+
+ closeListenerAndSleep(lis)
+}
+
+// Execute multiple transactions and queries.
+func exec(ctxt context.Context, chaincodeID string, numTrans int, numQueries int) []error {
+ var wg sync.WaitGroup
+ errs := make([]error, numTrans+numQueries)
+
+ e := func(qnum int, typ pb.Transaction_Type) {
+ defer wg.Done()
+ var spec *pb.ChaincodeSpec
+ if typ == pb.Transaction_CHAINCODE_INVOKE {
+ f := "invoke"
+ args := []string{"a", "b", "10"}
+
+ spec = &pb.ChaincodeSpec{Type: 1, ChaincodeID: &pb.ChaincodeID{Name: chaincodeID}, CtorMsg: &pb.ChaincodeInput{Function: f, Args: args}}
+ } else {
+ f := "query"
+ args := []string{"a"}
+
+ spec = &pb.ChaincodeSpec{Type: 1, ChaincodeID: &pb.ChaincodeID{Name: chaincodeID}, CtorMsg: &pb.ChaincodeInput{Function: f, Args: args}}
+ }
+
+ _, _, _, err := invoke(ctxt, spec, typ)
+
+ if err != nil {
+ errs[qnum] = fmt.Errorf("Error executing <%s>: %s", chaincodeID, err)
+ return
+ }
+ }
+ wg.Add(numTrans + numQueries)
+
+ //execute transactions sequentially..
+ go func() {
+ for i := 0; i < numTrans; i++ {
+ e(i, pb.Transaction_CHAINCODE_INVOKE)
+ }
+ }()
+
+ //...but queries in parallel
+ for i := numTrans; i < numTrans+numQueries; i++ {
+ go e(i, pb.Transaction_CHAINCODE_QUERY)
+ }
+
+ wg.Wait()
+ return errs
+}
+
+// Test the execution of a query.
+func TestExecuteQuery(t *testing.T) {
+ var opts []grpc.ServerOption
+ if viper.GetBool("peer.tls.enabled") {
+ creds, err := credentials.NewServerTLSFromFile(viper.GetString("peer.tls.cert.file"), viper.GetString("peer.tls.key.file"))
+ if err != nil {
+ grpclog.Fatalf("Failed to generate credentials %v", err)
+ }
+ opts = []grpc.ServerOption{grpc.Creds(creds)}
+ }
+ grpcServer := grpc.NewServer(opts...)
+ viper.Set("peer.fileSystemPath", "/var/hyperledger/test/tmpdb")
+
+ //use a different address than what we usually use for "peer"
+ //we override the peerAddress set in chaincode_support.go
+ peerAddress := "0.0.0.0:21212"
+
+ lis, err := net.Listen("tcp", peerAddress)
+ if err != nil {
+ t.Fail()
+ t.Logf("Error starting peer listener %s", err)
+ return
+ }
+
+ getPeerEndpoint := func() (*pb.PeerEndpoint, error) {
+ return &pb.PeerEndpoint{ID: &pb.PeerID{Name: "testpeer"}, Address: peerAddress}, nil
+ }
+
+ ccStartupTimeout := time.Duration(chaincodeStartupTimeoutDefault) * time.Millisecond
+ pb.RegisterChaincodeSupportServer(grpcServer, NewChaincodeSupport(DefaultChain, getPeerEndpoint, false, ccStartupTimeout, nil))
+
+ go grpcServer.Serve(lis)
+
+ var ctxt = context.Background()
+
+ url := "github.com/hyperledger/fabric/examples/chaincode/go/chaincode_example02"
+
+ cID := &pb.ChaincodeID{Path: url}
+ f := "init"
+ args := []string{"a", "100", "b", "200"}
+
+ spec := &pb.ChaincodeSpec{Type: 1, ChaincodeID: cID, CtorMsg: &pb.ChaincodeInput{Function: f, Args: args}}
+
+ _, err = deploy(ctxt, spec)
+ chaincodeID := spec.ChaincodeID.Name
+ if err != nil {
+ t.Fail()
+ t.Logf("Error initializing chaincode %s(%s)", chaincodeID, err)
+ GetChain(DefaultChain).Stop(ctxt, &pb.ChaincodeDeploymentSpec{ChaincodeSpec: spec})
+ closeListenerAndSleep(lis)
+ return
+ }
+
+ time.Sleep(2 * time.Second)
+
+ //start := getNowMillis()
+ //fmt.Fprintf(os.Stderr, "Starting: %d\n", start)
+ numTrans := 2
+ numQueries := 10
+ errs := exec(ctxt, chaincodeID, numTrans, numQueries)
+
+ var numerrs int
+ for i := 0; i < numTrans+numQueries; i++ {
+ if errs[i] != nil {
+ t.Logf("Error doing query on %d %s", i, errs[i])
+ numerrs++
+ }
+ }
+
+ if numerrs == 0 {
+ t.Logf("Query test passed")
+ } else {
+ t.Logf("Query test failed(total errors %d)", numerrs)
+ t.Fail()
+ }
+
+ //end := getNowMillis()
+ //fmt.Fprintf(os.Stderr, "Ending: %d\n", end)
+ //fmt.Fprintf(os.Stderr, "Elapsed : %d millis\n", end-start)
+ GetChain(DefaultChain).Stop(ctxt, &pb.ChaincodeDeploymentSpec{ChaincodeSpec: spec})
+ closeListenerAndSleep(lis)
+}
+
+// Test the execution of an invalid transaction.
+func TestExecuteInvokeInvalidTransaction(t *testing.T) {
+ var opts []grpc.ServerOption
+ if viper.GetBool("peer.tls.enabled") {
+ creds, err := credentials.NewServerTLSFromFile(viper.GetString("peer.tls.cert.file"), viper.GetString("peer.tls.key.file"))
+ if err != nil {
+ grpclog.Fatalf("Failed to generate credentials %v", err)
+ }
+ opts = []grpc.ServerOption{grpc.Creds(creds)}
+ }
+ grpcServer := grpc.NewServer(opts...)
+ viper.Set("peer.fileSystemPath", "/var/hyperledger/test/tmpdb")
+
+ //use a different address than what we usually use for "peer"
+ //we override the peerAddress set in chaincode_support.go
+ peerAddress := "0.0.0.0:21212"
+
+ lis, err := net.Listen("tcp", peerAddress)
+ if err != nil {
+ t.Fail()
+ t.Logf("Error starting peer listener %s", err)
+ return
+ }
+
+ getPeerEndpoint := func() (*pb.PeerEndpoint, error) {
+ return &pb.PeerEndpoint{ID: &pb.PeerID{Name: "testpeer"}, Address: peerAddress}, nil
+ }
+
+ ccStartupTimeout := time.Duration(chaincodeStartupTimeoutDefault) * time.Millisecond
+ pb.RegisterChaincodeSupportServer(grpcServer, NewChaincodeSupport(DefaultChain, getPeerEndpoint, false, ccStartupTimeout, nil))
+
+ go grpcServer.Serve(lis)
+
+ var ctxt = context.Background()
+
+ url := "github.com/hyperledger/fabric/examples/chaincode/go/chaincode_example02"
+ chaincodeID := &pb.ChaincodeID{Path: url}
+
+ //FAIL, FAIL!
+ args := []string{"x", "-1"}
+ err = invokeExample02Transaction(ctxt, chaincodeID, args, false)
+
+ //this HAS to fail with expectedDeltaStringPrefix
+ if err != nil {
+ errStr := err.Error()
+ t.Logf("Got error %s\n", errStr)
+ t.Logf("InvalidInvoke test passed")
+ GetChain(DefaultChain).Stop(ctxt, &pb.ChaincodeDeploymentSpec{ChaincodeSpec: &pb.ChaincodeSpec{ChaincodeID: chaincodeID}})
+
+ closeListenerAndSleep(lis)
+ return
+ }
+
+ t.Fail()
+ t.Logf("Error invoking transaction %s", err)
+
+ GetChain(DefaultChain).Stop(ctxt, &pb.ChaincodeDeploymentSpec{ChaincodeSpec: &pb.ChaincodeSpec{ChaincodeID: chaincodeID}})
+
+ closeListenerAndSleep(lis)
+}
+
+// Test the execution of an invalid query.
+func TestExecuteInvalidQuery(t *testing.T) {
+ var opts []grpc.ServerOption
+ if viper.GetBool("peer.tls.enabled") {
+ creds, err := credentials.NewServerTLSFromFile(viper.GetString("peer.tls.cert.file"), viper.GetString("peer.tls.key.file"))
+ if err != nil {
+ grpclog.Fatalf("Failed to generate credentials %v", err)
+ }
+ opts = []grpc.ServerOption{grpc.Creds(creds)}
+ }
+ grpcServer := grpc.NewServer(opts...)
+ viper.Set("peer.fileSystemPath", "/var/hyperledger/test/tmpdb")
+
+ //use a different address than what we usually use for "peer"
+ //we override the peerAddress set in chaincode_support.go
+ peerAddress := "0.0.0.0:21212"
+
+ lis, err := net.Listen("tcp", peerAddress)
+ if err != nil {
+ t.Fail()
+ t.Logf("Error starting peer listener %s", err)
+ return
+ }
+
+ getPeerEndpoint := func() (*pb.PeerEndpoint, error) {
+ return &pb.PeerEndpoint{ID: &pb.PeerID{Name: "testpeer"}, Address: peerAddress}, nil
+ }
+
+ ccStartupTimeout := time.Duration(chaincodeStartupTimeoutDefault) * time.Millisecond
+ pb.RegisterChaincodeSupportServer(grpcServer, NewChaincodeSupport(DefaultChain, getPeerEndpoint, false, ccStartupTimeout, nil))
+
+ go grpcServer.Serve(lis)
+
+ var ctxt = context.Background()
+
+ url := "github.com/hyperledger/fabric/examples/chaincode/go/chaincode_example03"
+
+ cID := &pb.ChaincodeID{Path: url}
+ f := "init"
+ args := []string{"a", "100"}
+
+ spec := &pb.ChaincodeSpec{Type: 1, ChaincodeID: cID, CtorMsg: &pb.ChaincodeInput{Function: f, Args: args}}
+
+ _, err = deploy(ctxt, spec)
+ chaincodeID := spec.ChaincodeID.Name
+ if err != nil {
+ t.Fail()
+ t.Logf("Error initializing chaincode %s(%s)", chaincodeID, err)
+ GetChain(DefaultChain).Stop(ctxt, &pb.ChaincodeDeploymentSpec{ChaincodeSpec: spec})
+ closeListenerAndSleep(lis)
+ return
+ }
+
+ time.Sleep(time.Second)
+
+ f = "query"
+ args = []string{"b", "200"}
+
+ spec = &pb.ChaincodeSpec{Type: 1, ChaincodeID: cID, CtorMsg: &pb.ChaincodeInput{Function: f, Args: args}}
+ // This query should fail as it attempts to put state
+ _, _, _, err = invoke(ctxt, spec, pb.Transaction_CHAINCODE_QUERY)
+
+ if err == nil {
+ t.Fail()
+ t.Logf("This query should not have succeeded as it attempts to put state")
+ }
+
+ GetChain(DefaultChain).Stop(ctxt, &pb.ChaincodeDeploymentSpec{ChaincodeSpec: spec})
+ closeListenerAndSleep(lis)
+}
+
+// Test the execution of a chaincode that invokes another chaincode.
+func TestChaincodeInvokeChaincode(t *testing.T) {
+ var opts []grpc.ServerOption
+ if viper.GetBool("peer.tls.enabled") {
+ creds, err := credentials.NewServerTLSFromFile(viper.GetString("peer.tls.cert.file"), viper.GetString("peer.tls.key.file"))
+ if err != nil {
+ grpclog.Fatalf("Failed to generate credentials %v", err)
+ }
+ opts = []grpc.ServerOption{grpc.Creds(creds)}
+ }
+ grpcServer := grpc.NewServer(opts...)
+ viper.Set("peer.fileSystemPath", "/var/hyperledger/test/tmpdb")
+
+ //use a different address than what we usually use for "peer"
+ //we override the peerAddress set in chaincode_support.go
+ peerAddress := "0.0.0.0:21212"
+
+ lis, err := net.Listen("tcp", peerAddress)
+ if err != nil {
+ t.Fail()
+ t.Logf("Error starting peer listener %s", err)
+ return
+ }
+
+ getPeerEndpoint := func() (*pb.PeerEndpoint, error) {
+ return &pb.PeerEndpoint{ID: &pb.PeerID{Name: "testpeer"}, Address: peerAddress}, nil
+ }
+
+ ccStartupTimeout := time.Duration(chaincodeStartupTimeoutDefault) * time.Millisecond
+ pb.RegisterChaincodeSupportServer(grpcServer, NewChaincodeSupport(DefaultChain, getPeerEndpoint, false, ccStartupTimeout, nil))
+
+ go grpcServer.Serve(lis)
+
+ var ctxt = context.Background()
+
+ // Deploy first chaincode
+ url1 := "github.com/hyperledger/fabric/examples/chaincode/go/chaincode_example02"
+
+ cID1 := &pb.ChaincodeID{Path: url1}
+ f := "init"
+ args := []string{"a", "100", "b", "200"}
+
+ spec1 := &pb.ChaincodeSpec{Type: 1, ChaincodeID: cID1, CtorMsg: &pb.ChaincodeInput{Function: f, Args: args}}
+
+ _, err = deploy(ctxt, spec1)
+ chaincodeID1 := spec1.ChaincodeID.Name
+ if err != nil {
+ t.Fail()
+ t.Logf("Error initializing chaincode %s(%s)", chaincodeID1, err)
+ GetChain(DefaultChain).Stop(ctxt, &pb.ChaincodeDeploymentSpec{ChaincodeSpec: spec1})
+ closeListenerAndSleep(lis)
+ return
+ }
+
+ time.Sleep(time.Second)
+
+ // Deploy second chaincode
+ url2 := "github.com/hyperledger/fabric/examples/chaincode/go/chaincode_example04"
+
+ cID2 := &pb.ChaincodeID{Path: url2}
+ f = "init"
+ args = []string{"e", "0"}
+
+ spec2 := &pb.ChaincodeSpec{Type: 1, ChaincodeID: cID2, CtorMsg: &pb.ChaincodeInput{Function: f, Args: args}}
+
+ _, err = deploy(ctxt, spec2)
+ chaincodeID2 := spec2.ChaincodeID.Name
+ if err != nil {
+ t.Fail()
+ t.Logf("Error initializing chaincode %s(%s)", chaincodeID2, err)
+ GetChain(DefaultChain).Stop(ctxt, &pb.ChaincodeDeploymentSpec{ChaincodeSpec: spec1})
+ GetChain(DefaultChain).Stop(ctxt, &pb.ChaincodeDeploymentSpec{ChaincodeSpec: spec2})
+ closeListenerAndSleep(lis)
+ return
+ }
+
+ time.Sleep(time.Second)
+
+ // Invoke second chaincode, which will inturn invoke the first chaincode
+ f = "invoke"
+ args = []string{"e", "1"}
+
+ spec2 = &pb.ChaincodeSpec{Type: 1, ChaincodeID: cID2, CtorMsg: &pb.ChaincodeInput{Function: f, Args: args}}
+ // Invoke chaincode
+ var uuid string
+ _, uuid, _, err = invoke(ctxt, spec2, pb.Transaction_CHAINCODE_INVOKE)
+
+ if err != nil {
+ t.Fail()
+ t.Logf("Error invoking <%s>: %s", chaincodeID2, err)
+ GetChain(DefaultChain).Stop(ctxt, &pb.ChaincodeDeploymentSpec{ChaincodeSpec: spec1})
+ GetChain(DefaultChain).Stop(ctxt, &pb.ChaincodeDeploymentSpec{ChaincodeSpec: spec2})
+ closeListenerAndSleep(lis)
+ return
+ }
+
+ // Check the state in the ledger
+ err = checkFinalState(uuid, chaincodeID1)
+ if err != nil {
+ t.Fail()
+ t.Logf("Incorrect final state after transaction for <%s>: %s", chaincodeID1, err)
+ GetChain(DefaultChain).Stop(ctxt, &pb.ChaincodeDeploymentSpec{ChaincodeSpec: spec1})
+ GetChain(DefaultChain).Stop(ctxt, &pb.ChaincodeDeploymentSpec{ChaincodeSpec: spec2})
+ closeListenerAndSleep(lis)
+ return
+ }
+
+ GetChain(DefaultChain).Stop(ctxt, &pb.ChaincodeDeploymentSpec{ChaincodeSpec: spec1})
+ GetChain(DefaultChain).Stop(ctxt, &pb.ChaincodeDeploymentSpec{ChaincodeSpec: spec2})
+ closeListenerAndSleep(lis)
+}
+
+// Test the execution of a chaincode that invokes another chaincode with wrong parameters. Should receive error from
+// from the called chaincode
+func TestChaincodeInvokeChaincodeErrorCase(t *testing.T) {
+ var opts []grpc.ServerOption
+ if viper.GetBool("peer.tls.enabled") {
+ creds, err := credentials.NewServerTLSFromFile(viper.GetString("peer.tls.cert.file"), viper.GetString("peer.tls.key.file"))
+ if err != nil {
+ grpclog.Fatalf("Failed to generate credentials %v", err)
+ }
+ opts = []grpc.ServerOption{grpc.Creds(creds)}
+ }
+ grpcServer := grpc.NewServer(opts...)
+ viper.Set("peer.fileSystemPath", "/var/hyperledger/test/tmpdb")
+
+ //use a different address than what we usually use for "peer"
+ //we override the peerAddress set in chaincode_support.go
+ peerAddress := "0.0.0.0:21212"
+
+ lis, err := net.Listen("tcp", peerAddress)
+ if err != nil {
+ t.Fail()
+ t.Logf("Error starting peer listener %s", err)
+ return
+ }
+
+ getPeerEndpoint := func() (*pb.PeerEndpoint, error) {
+ return &pb.PeerEndpoint{ID: &pb.PeerID{Name: "testpeer"}, Address: peerAddress}, nil
+ }
+
+ ccStartupTimeout := time.Duration(chaincodeStartupTimeoutDefault) * time.Millisecond
+ pb.RegisterChaincodeSupportServer(grpcServer, NewChaincodeSupport(DefaultChain, getPeerEndpoint, false, ccStartupTimeout, nil))
+
+ go grpcServer.Serve(lis)
+
+ var ctxt = context.Background()
+
+ // Deploy first chaincode
+ url1 := "github.com/hyperledger/fabric/examples/chaincode/go/chaincode_example02"
+
+ cID1 := &pb.ChaincodeID{Path: url1}
+ f := "init"
+ args := []string{"a", "100", "b", "200"}
+
+ spec1 := &pb.ChaincodeSpec{Type: 1, ChaincodeID: cID1, CtorMsg: &pb.ChaincodeInput{Function: f, Args: args}}
+
+ _, err = deploy(ctxt, spec1)
+ chaincodeID1 := spec1.ChaincodeID.Name
+ if err != nil {
+ t.Fail()
+ t.Logf("Error initializing chaincode %s(%s)", chaincodeID1, err)
+ GetChain(DefaultChain).Stop(ctxt, &pb.ChaincodeDeploymentSpec{ChaincodeSpec: spec1})
+ closeListenerAndSleep(lis)
+ return
+ }
+
+ time.Sleep(time.Second)
+
+ // Deploy second chaincode
+ url2 := "github.com/hyperledger/fabric/examples/chaincode/go/passthru"
+
+ cID2 := &pb.ChaincodeID{Path: url2}
+ f = "init"
+ args = []string{""}
+
+ spec2 := &pb.ChaincodeSpec{Type: 1, ChaincodeID: cID2, CtorMsg: &pb.ChaincodeInput{Function: f, Args: args}}
+
+ _, err = deploy(ctxt, spec2)
+ chaincodeID2 := spec2.ChaincodeID.Name
+ if err != nil {
+ t.Fail()
+ t.Logf("Error initializing chaincode %s(%s)", chaincodeID2, err)
+ GetChain(DefaultChain).Stop(ctxt, &pb.ChaincodeDeploymentSpec{ChaincodeSpec: spec1})
+ GetChain(DefaultChain).Stop(ctxt, &pb.ChaincodeDeploymentSpec{ChaincodeSpec: spec2})
+ closeListenerAndSleep(lis)
+ return
+ }
+
+ time.Sleep(time.Second)
+
+ // Invoke second chaincode, which will inturn invoke the first chaincode but pass bad params
+ f = chaincodeID1
+ args = []string{"invoke", "a"} //expect {"invoke", "a","b","10"}
+
+ spec2 = &pb.ChaincodeSpec{Type: 1, ChaincodeID: cID2, CtorMsg: &pb.ChaincodeInput{Function: f, Args: args}}
+ // Invoke chaincode
+ _, _, _, err = invoke(ctxt, spec2, pb.Transaction_CHAINCODE_INVOKE)
+
+ if err == nil {
+ t.Fail()
+ t.Logf("Error invoking <%s>: %s", chaincodeID2, err)
+ GetChain(DefaultChain).Stop(ctxt, &pb.ChaincodeDeploymentSpec{ChaincodeSpec: spec1})
+ GetChain(DefaultChain).Stop(ctxt, &pb.ChaincodeDeploymentSpec{ChaincodeSpec: spec2})
+ closeListenerAndSleep(lis)
+ return
+ }
+
+ if strings.Index(err.Error(), "Incorrect number of arguments. Expecting 3") < 0 {
+ t.Fail()
+ t.Logf("Unexpected error %s", err)
+ GetChain(DefaultChain).Stop(ctxt, &pb.ChaincodeDeploymentSpec{ChaincodeSpec: spec1})
+ GetChain(DefaultChain).Stop(ctxt, &pb.ChaincodeDeploymentSpec{ChaincodeSpec: spec2})
+ closeListenerAndSleep(lis)
+ return
+ }
+
+ GetChain(DefaultChain).Stop(ctxt, &pb.ChaincodeDeploymentSpec{ChaincodeSpec: spec1})
+ GetChain(DefaultChain).Stop(ctxt, &pb.ChaincodeDeploymentSpec{ChaincodeSpec: spec2})
+ closeListenerAndSleep(lis)
+}
+
+func chaincodeQueryChaincode(user string) error {
+ var ctxt = context.Background()
+
+ // Deploy first chaincode
+ url1 := "github.com/hyperledger/fabric/examples/chaincode/go/chaincode_example02"
+
+ cID1 := &pb.ChaincodeID{Path: url1}
+ f := "init"
+ args := []string{"a", "100", "b", "200"}
+
+ spec1 := &pb.ChaincodeSpec{Type: 1, ChaincodeID: cID1, CtorMsg: &pb.ChaincodeInput{Function: f, Args: args}, SecureContext: user}
+
+ _, err := deploy(ctxt, spec1)
+ chaincodeID1 := spec1.ChaincodeID.Name
+ if err != nil {
+ GetChain(DefaultChain).Stop(ctxt, &pb.ChaincodeDeploymentSpec{ChaincodeSpec: spec1})
+ return fmt.Errorf("Error initializing chaincode %s(%s)", chaincodeID1, err)
+ }
+
+ time.Sleep(time.Second)
+
+ // Deploy second chaincode
+ url2 := "github.com/hyperledger/fabric/examples/chaincode/go/chaincode_example05"
+
+ cID2 := &pb.ChaincodeID{Path: url2}
+ f = "init"
+ args = []string{"sum", "0"}
+
+ spec2 := &pb.ChaincodeSpec{Type: 1, ChaincodeID: cID2, CtorMsg: &pb.ChaincodeInput{Function: f, Args: args}, SecureContext: user}
+
+ _, err = deploy(ctxt, spec2)
+ chaincodeID2 := spec2.ChaincodeID.Name
+ if err != nil {
+ GetChain(DefaultChain).Stop(ctxt, &pb.ChaincodeDeploymentSpec{ChaincodeSpec: spec1})
+ GetChain(DefaultChain).Stop(ctxt, &pb.ChaincodeDeploymentSpec{ChaincodeSpec: spec2})
+ return fmt.Errorf("Error initializing chaincode %s(%s)", chaincodeID2, err)
+ }
+
+ time.Sleep(time.Second)
+
+ // Invoke second chaincode, which will inturn query the first chaincode
+ f = "invoke"
+ args = []string{chaincodeID1, "sum"}
+
+ spec2 = &pb.ChaincodeSpec{Type: 1, ChaincodeID: cID2, CtorMsg: &pb.ChaincodeInput{Function: f, Args: args}, SecureContext: user}
+ // Invoke chaincode
+ var retVal []byte
+ _, _, retVal, err = invoke(ctxt, spec2, pb.Transaction_CHAINCODE_INVOKE)
+
+ if err != nil {
+ GetChain(DefaultChain).Stop(ctxt, &pb.ChaincodeDeploymentSpec{ChaincodeSpec: spec1})
+ GetChain(DefaultChain).Stop(ctxt, &pb.ChaincodeDeploymentSpec{ChaincodeSpec: spec2})
+ return fmt.Errorf("Error invoking <%s>: %s", chaincodeID2, err)
+ }
+
+ // Check the return value
+ result, err := strconv.Atoi(string(retVal))
+ if err != nil || result != 300 {
+ GetChain(DefaultChain).Stop(ctxt, &pb.ChaincodeDeploymentSpec{ChaincodeSpec: spec1})
+ GetChain(DefaultChain).Stop(ctxt, &pb.ChaincodeDeploymentSpec{ChaincodeSpec: spec2})
+ return fmt.Errorf("Incorrect final state after transaction for <%s>: %s", chaincodeID1, err)
+ }
+
+ // Query second chaincode, which will inturn query the first chaincode
+ f = "query"
+ args = []string{chaincodeID1, "sum"}
+
+ spec2 = &pb.ChaincodeSpec{Type: 1, ChaincodeID: cID2, CtorMsg: &pb.ChaincodeInput{Function: f, Args: args}, SecureContext: user}
+ // Invoke chaincode
+ _, _, retVal, err = invoke(ctxt, spec2, pb.Transaction_CHAINCODE_QUERY)
+
+ if err != nil {
+ GetChain(DefaultChain).Stop(ctxt, &pb.ChaincodeDeploymentSpec{ChaincodeSpec: spec1})
+ GetChain(DefaultChain).Stop(ctxt, &pb.ChaincodeDeploymentSpec{ChaincodeSpec: spec2})
+ return fmt.Errorf("Error querying <%s>: %s", chaincodeID2, err)
+ }
+
+ // Check the return value
+ result, err = strconv.Atoi(string(retVal))
+ if err != nil || result != 300 {
+ GetChain(DefaultChain).Stop(ctxt, &pb.ChaincodeDeploymentSpec{ChaincodeSpec: spec1})
+ GetChain(DefaultChain).Stop(ctxt, &pb.ChaincodeDeploymentSpec{ChaincodeSpec: spec2})
+ return fmt.Errorf("Incorrect final value after query for <%s>: %s", chaincodeID1, err)
+ }
+
+ GetChain(DefaultChain).Stop(ctxt, &pb.ChaincodeDeploymentSpec{ChaincodeSpec: spec1})
+ GetChain(DefaultChain).Stop(ctxt, &pb.ChaincodeDeploymentSpec{ChaincodeSpec: spec2})
+
+ return nil
+}
+
+// Test the execution of a chaincode query that queries another chaincode without security enabled
+func TestChaincodeQueryChaincode(t *testing.T) {
+ var peerLis net.Listener
+ var err error
+ if peerLis, err = initPeer(); err != nil {
+ t.Fail()
+ t.Logf("Error registering user %s", err)
+ return
+ }
+
+ if err = chaincodeQueryChaincode(""); err != nil {
+ finitPeer(peerLis)
+ t.Fail()
+ t.Logf("Error executing test %s", err)
+ return
+ }
+
+ finitPeer(peerLis)
+}
+
+// Test the execution of a chaincode that queries another chaincode with invalid parameter. Should receive error from
+// from the called chaincode
+func TestChaincodeQueryChaincodeErrorCase(t *testing.T) {
+ var opts []grpc.ServerOption
+ if viper.GetBool("peer.tls.enabled") {
+ creds, err := credentials.NewServerTLSFromFile(viper.GetString("peer.tls.cert.file"), viper.GetString("peer.tls.key.file"))
+ if err != nil {
+ grpclog.Fatalf("Failed to generate credentials %v", err)
+ }
+ opts = []grpc.ServerOption{grpc.Creds(creds)}
+ }
+ grpcServer := grpc.NewServer(opts...)
+ viper.Set("peer.fileSystemPath", "/var/hyperledger/test/tmpdb")
+
+ //use a different address than what we usually use for "peer"
+ //we override the peerAddress set in chaincode_support.go
+ peerAddress := "0.0.0.0:21212"
+
+ lis, err := net.Listen("tcp", peerAddress)
+ if err != nil {
+ t.Fail()
+ t.Logf("Error starting peer listener %s", err)
+ return
+ }
+
+ getPeerEndpoint := func() (*pb.PeerEndpoint, error) {
+ return &pb.PeerEndpoint{ID: &pb.PeerID{Name: "testpeer"}, Address: peerAddress}, nil
+ }
+
+ ccStartupTimeout := time.Duration(chaincodeStartupTimeoutDefault) * time.Millisecond
+ pb.RegisterChaincodeSupportServer(grpcServer, NewChaincodeSupport(DefaultChain, getPeerEndpoint, false, ccStartupTimeout, nil))
+
+ go grpcServer.Serve(lis)
+
+ var ctxt = context.Background()
+
+ // Deploy first chaincode
+ url1 := "github.com/hyperledger/fabric/examples/chaincode/go/chaincode_example02"
+
+ cID1 := &pb.ChaincodeID{Path: url1}
+ f := "init"
+ args := []string{"a", "100", "b", "200"}
+
+ spec1 := &pb.ChaincodeSpec{Type: 1, ChaincodeID: cID1, CtorMsg: &pb.ChaincodeInput{Function: f, Args: args}}
+
+ _, err = deploy(ctxt, spec1)
+ chaincodeID1 := spec1.ChaincodeID.Name
+ if err != nil {
+ t.Fail()
+ t.Logf("Error initializing chaincode %s(%s)", chaincodeID1, err)
+ GetChain(DefaultChain).Stop(ctxt, &pb.ChaincodeDeploymentSpec{ChaincodeSpec: spec1})
+ closeListenerAndSleep(lis)
+ return
+ }
+
+ time.Sleep(time.Second)
+
+ // Deploy second chaincode
+ url2 := "github.com/hyperledger/fabric/examples/chaincode/go/passthru"
+
+ cID2 := &pb.ChaincodeID{Path: url2}
+ f = "init"
+ args = []string{""}
+
+ spec2 := &pb.ChaincodeSpec{Type: 1, ChaincodeID: cID2, CtorMsg: &pb.ChaincodeInput{Function: f, Args: args}}
+
+ _, err = deploy(ctxt, spec2)
+ chaincodeID2 := spec2.ChaincodeID.Name
+ if err != nil {
+ t.Fail()
+ t.Logf("Error initializing chaincode %s(%s)", chaincodeID2, err)
+ GetChain(DefaultChain).Stop(ctxt, &pb.ChaincodeDeploymentSpec{ChaincodeSpec: spec1})
+ GetChain(DefaultChain).Stop(ctxt, &pb.ChaincodeDeploymentSpec{ChaincodeSpec: spec2})
+ closeListenerAndSleep(lis)
+ return
+ }
+
+ time.Sleep(time.Second)
+
+ // Invoke second chaincode, which will inturn invoke the first chaincode but pass bad params
+ f = chaincodeID1
+ args = []string{"query", "c"} //expect {"query", "a"}
+
+ spec2 = &pb.ChaincodeSpec{Type: 1, ChaincodeID: cID2, CtorMsg: &pb.ChaincodeInput{Function: f, Args: args}}
+ // Invoke chaincode
+ _, _, _, err = invoke(ctxt, spec2, pb.Transaction_CHAINCODE_QUERY)
+
+ if err == nil {
+ t.Fail()
+ t.Logf("Error invoking <%s>: %s", chaincodeID2, err)
+ GetChain(DefaultChain).Stop(ctxt, &pb.ChaincodeDeploymentSpec{ChaincodeSpec: spec1})
+ GetChain(DefaultChain).Stop(ctxt, &pb.ChaincodeDeploymentSpec{ChaincodeSpec: spec2})
+ closeListenerAndSleep(lis)
+ return
+ }
+
+ if strings.Index(err.Error(), "Nil amount for c") < 0 {
+ t.Fail()
+ t.Logf("Unexpected error %s", err)
+ GetChain(DefaultChain).Stop(ctxt, &pb.ChaincodeDeploymentSpec{ChaincodeSpec: spec1})
+ GetChain(DefaultChain).Stop(ctxt, &pb.ChaincodeDeploymentSpec{ChaincodeSpec: spec2})
+ closeListenerAndSleep(lis)
+ return
+ }
+
+ GetChain(DefaultChain).Stop(ctxt, &pb.ChaincodeDeploymentSpec{ChaincodeSpec: spec1})
+ GetChain(DefaultChain).Stop(ctxt, &pb.ChaincodeDeploymentSpec{ChaincodeSpec: spec2})
+ closeListenerAndSleep(lis)
+}
+
+// Test the execution of a chaincode query that queries another chaincode with security enabled
+// NOTE: this really needs to be a behave test. Remove when we have support in behave for multiple chaincodes
+func TestChaincodeQueryChaincodeWithSec(t *testing.T) {
+ viper.Set("security.enabled", "true")
+
+ //Initialize crypto
+ if err := crypto.Init(); err != nil {
+ panic(fmt.Errorf("Failed initializing the crypto layer [%s]", err))
+ }
+
+ //set paths for memberservice to pick up
+ viper.Set("peer.fileSystemPath", filepath.Join(os.TempDir(), "hyperledger", "production"))
+ viper.Set("server.rootpath", filepath.Join(os.TempDir(), "ca"))
+
+ var err error
+ var memSrvcLis net.Listener
+ if memSrvcLis, err = initMemSrvc(); err != nil {
+ t.Fail()
+ t.Logf("Error registering user %s", err)
+ return
+ }
+
+ time.Sleep(2 * time.Second)
+
+ var peerLis net.Listener
+ if peerLis, err = initPeer(); err != nil {
+ finitMemSrvc(memSrvcLis)
+ t.Fail()
+ t.Logf("Error registering user %s", err)
+ return
+ }
+
+ if err = crypto.RegisterClient("jim", nil, "jim", "6avZQLwcUe9b"); err != nil {
+ finitMemSrvc(memSrvcLis)
+ finitPeer(peerLis)
+ t.Fail()
+ t.Logf("Error registering user %s", err)
+ return
+ }
+
+ //login as jim and test chaincode-chaincode interaction with security
+ if err = chaincodeQueryChaincode("jim"); err != nil {
+ finitMemSrvc(memSrvcLis)
+ finitPeer(peerLis)
+ t.Fail()
+ t.Logf("Error executing test %s", err)
+ return
+ }
+
+ //cleanup
+ finitMemSrvc(memSrvcLis)
+ finitPeer(peerLis)
+}
+
+// Test the invocation of a transaction.
+func TestRangeQuery(t *testing.T) {
+ var opts []grpc.ServerOption
+ if viper.GetBool("peer.tls.enabled") {
+ creds, err := credentials.NewServerTLSFromFile(viper.GetString("peer.tls.cert.file"), viper.GetString("peer.tls.key.file"))
+ if err != nil {
+ grpclog.Fatalf("Failed to generate credentials %v", err)
+ }
+ opts = []grpc.ServerOption{grpc.Creds(creds)}
+ }
+ grpcServer := grpc.NewServer(opts...)
+ viper.Set("peer.fileSystemPath", "/var/hyperledger/test/tmpdb")
+
+ //use a different address than what we usually use for "peer"
+ //we override the peerAddress set in chaincode_support.go
+ peerAddress := "0.0.0.0:21212"
+
+ lis, err := net.Listen("tcp", peerAddress)
+ if err != nil {
+ t.Fail()
+ t.Logf("Error starting peer listener %s", err)
+ return
+ }
+
+ getPeerEndpoint := func() (*pb.PeerEndpoint, error) {
+ return &pb.PeerEndpoint{ID: &pb.PeerID{Name: "testpeer"}, Address: peerAddress}, nil
+ }
+
+ ccStartupTimeout := time.Duration(chaincodeStartupTimeoutDefault) * time.Millisecond
+ pb.RegisterChaincodeSupportServer(grpcServer, NewChaincodeSupport(DefaultChain, getPeerEndpoint, false, ccStartupTimeout, nil))
+
+ go grpcServer.Serve(lis)
+
+ var ctxt = context.Background()
+
+ url := "github.com/hyperledger/fabric/examples/chaincode/go/map"
+ cID := &pb.ChaincodeID{Path: url}
+
+ args := []string{}
+ f := "init"
+
+ spec := &pb.ChaincodeSpec{Type: 1, ChaincodeID: cID, CtorMsg: &pb.ChaincodeInput{Function: f, Args: args}}
+
+ _, err = deploy(ctxt, spec)
+ chaincodeID := spec.ChaincodeID.Name
+ if err != nil {
+ t.Fail()
+ t.Logf("Error initializing chaincode %s(%s)", chaincodeID, err)
+ GetChain(DefaultChain).Stop(ctxt, &pb.ChaincodeDeploymentSpec{ChaincodeSpec: spec})
+ closeListenerAndSleep(lis)
+ return
+ }
+
+ // Invoke second chaincode, which will inturn invoke the first chaincode
+ f = "keys"
+ args = []string{}
+
+ spec = &pb.ChaincodeSpec{Type: 1, ChaincodeID: cID, CtorMsg: &pb.ChaincodeInput{Function: f, Args: args}}
+ _, _, _, err = invoke(ctxt, spec, pb.Transaction_CHAINCODE_QUERY)
+
+ if err != nil {
+ t.Fail()
+ t.Logf("Error invoking <%s>: %s", chaincodeID, err)
+ GetChain(DefaultChain).Stop(ctxt, &pb.ChaincodeDeploymentSpec{ChaincodeSpec: spec})
+ closeListenerAndSleep(lis)
+ return
+ }
+ GetChain(DefaultChain).Stop(ctxt, &pb.ChaincodeDeploymentSpec{ChaincodeSpec: spec})
+ closeListenerAndSleep(lis)
+}
+
+func TestGetEvent(t *testing.T) {
+ var opts []grpc.ServerOption
+ if viper.GetBool("peer.tls.enabled") {
+ creds, err := credentials.NewServerTLSFromFile(viper.GetString("peer.tls.cert.file"), viper.GetString("peer.tls.key.file"))
+ if err != nil {
+ grpclog.Fatalf("Failed to generate credentials %v", err)
+ }
+ opts = []grpc.ServerOption{grpc.Creds(creds)}
+ }
+ grpcServer := grpc.NewServer(opts...)
+ viper.Set("peer.fileSystemPath", "/var/hyperledger/test/tmpdb")
+
+ //use a different address than what we usually use for "peer"
+ //we override the peerAddress set in chaincode_support.go
+ peerAddress := "0.0.0.0:21212"
+
+ lis, err := net.Listen("tcp", peerAddress)
+ if err != nil {
+ t.Fail()
+ t.Logf("Error starting peer listener %s", err)
+ return
+ }
+
+ getPeerEndpoint := func() (*pb.PeerEndpoint, error) {
+ return &pb.PeerEndpoint{ID: &pb.PeerID{Name: "testpeer"}, Address: peerAddress}, nil
+ }
+
+ ccStartupTimeout := time.Duration(chaincodeStartupTimeoutDefault) * time.Millisecond
+ pb.RegisterChaincodeSupportServer(grpcServer, NewChaincodeSupport(DefaultChain, getPeerEndpoint, false, ccStartupTimeout, nil))
+
+ go grpcServer.Serve(lis)
+
+ var ctxt = context.Background()
+
+ url := "github.com/hyperledger/fabric/examples/chaincode/go/eventsender"
+
+ cID := &pb.ChaincodeID{Path: url}
+ spec := &pb.ChaincodeSpec{Type: 1, ChaincodeID: cID, CtorMsg: &pb.ChaincodeInput{Function: "init", Args: []string{}}}
+
+ _, err = deploy(ctxt, spec)
+ chaincodeID := spec.ChaincodeID.Name
+ if err != nil {
+ t.Fail()
+ t.Logf("Error initializing chaincode %s(%s)", chaincodeID, err)
+ GetChain(DefaultChain).Stop(ctxt, &pb.ChaincodeDeploymentSpec{ChaincodeSpec: spec})
+ closeListenerAndSleep(lis)
+ return
+ }
+
+ time.Sleep(time.Second)
+
+ args := []string{"i", "am", "satoshi"}
+
+ spec = &pb.ChaincodeSpec{Type: 1, ChaincodeID: cID, CtorMsg: &pb.ChaincodeInput{Function: "", Args: args}}
+
+ var ccevt *pb.ChaincodeEvent
+ ccevt, _, _, err = invoke(ctxt, spec, pb.Transaction_CHAINCODE_INVOKE)
+
+ if err != nil {
+ t.Logf("Error invoking chaincode %s(%s)", chaincodeID, err)
+ t.Fail()
+ }
+
+ if ccevt == nil {
+ t.Logf("Error ccevt is nil %s(%s)", chaincodeID, err)
+ t.Fail()
+ }
+
+ if ccevt.ChaincodeID != chaincodeID {
+ t.Logf("Error ccevt id(%s) != cid(%s)", ccevt.ChaincodeID, chaincodeID)
+ t.Fail()
+ }
+
+ if strings.Index(string(ccevt.Payload), "i,am,satoshi") < 0 {
+ t.Logf("Error expected event not found (%s)", string(ccevt.Payload))
+ t.Fail()
+ }
+
+ GetChain(DefaultChain).Stop(ctxt, &pb.ChaincodeDeploymentSpec{ChaincodeSpec: spec})
+ closeListenerAndSleep(lis)
+}
+
+func TestMain(m *testing.M) {
+ SetupTestConfig()
+ os.Exit(m.Run())
+}
diff --git a/core/chaincode/handler.go b/core/chaincode/handler.go
new file mode 100644
index 00000000000..089c898da8f
--- /dev/null
+++ b/core/chaincode/handler.go
@@ -0,0 +1,1530 @@
+/*
+Copyright IBM Corp. 2016 All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package chaincode
+
+import (
+ "fmt"
+ "io"
+ "sync"
+ "time"
+
+ "github.com/golang/protobuf/proto"
+ ccintf "github.com/hyperledger/fabric/core/container/ccintf"
+ "github.com/hyperledger/fabric/core/crypto"
+ "github.com/hyperledger/fabric/core/ledger/statemgmt"
+ "github.com/hyperledger/fabric/core/util"
+ pb "github.com/hyperledger/fabric/protos"
+ "github.com/looplab/fsm"
+ "github.com/op/go-logging"
+ "golang.org/x/net/context"
+
+ "github.com/hyperledger/fabric/core/ledger"
+)
+
+const (
+ createdstate = "created" //start state
+ establishedstate = "established" //in: CREATED, rcv: REGISTER, send: REGISTERED, INIT
+ initstate = "init" //in:ESTABLISHED, rcv:-, send: INIT
+ readystate = "ready" //in:ESTABLISHED,TRANSACTION, rcv:COMPLETED
+ transactionstate = "transaction" //in:READY, rcv: xact from consensus, send: TRANSACTION
+ busyinitstate = "busyinit" //in:INIT, rcv: PUT_STATE, DEL_STATE, INVOKE_CHAINCODE
+ busyxactstate = "busyxact" //in:TRANSACION, rcv: PUT_STATE, DEL_STATE, INVOKE_CHAINCODE
+ endstate = "end" //in:INIT,ESTABLISHED, rcv: error, terminate container
+
+)
+
+var chaincodeLogger = logging.MustGetLogger("chaincode")
+
+// MessageHandler interface for handling chaincode messages (common between Peer chaincode support and chaincode)
+type MessageHandler interface {
+ HandleMessage(msg *pb.ChaincodeMessage) error
+ SendMessage(msg *pb.ChaincodeMessage) error
+}
+
+type transactionContext struct {
+ transactionSecContext *pb.Transaction
+ responseNotifier chan *pb.ChaincodeMessage
+
+ // tracks open iterators used for range queries
+ rangeQueryIteratorMap map[string]statemgmt.RangeScanIterator
+}
+
+type nextStateInfo struct {
+ msg *pb.ChaincodeMessage
+ sendToCC bool
+}
+
+// Handler responsbile for management of Peer's side of chaincode stream
+type Handler struct {
+ sync.RWMutex
+ //peer to shim grpc serializer. User only in serialSend
+ serialLock sync.Mutex
+ ChatStream ccintf.ChaincodeStream
+ FSM *fsm.FSM
+ ChaincodeID *pb.ChaincodeID
+
+ // A copy of decrypted deploy tx this handler manages, no code
+ deployTXSecContext *pb.Transaction
+
+ chaincodeSupport *ChaincodeSupport
+ registered bool
+ readyNotify chan bool
+ // Map of tx uuid to either invoke or query tx (decrypted). Each tx will be
+ // added prior to execute and remove when done execute
+ txCtxs map[string]*transactionContext
+
+ uuidMap map[string]bool
+
+ // Track which UUIDs are queries; Although the shim maintains this, it cannot be trusted.
+ isTransaction map[string]bool
+
+ // used to do Send after making sure the state transition is complete
+ nextState chan *nextStateInfo
+}
+
+func shortuuid(uuid string) string {
+ if len(uuid) < 8 {
+ return uuid
+ }
+ return uuid[0:8]
+}
+
+func (handler *Handler) serialSend(msg *pb.ChaincodeMessage) error {
+ handler.serialLock.Lock()
+ defer handler.serialLock.Unlock()
+ if err := handler.ChatStream.Send(msg); err != nil {
+ chaincodeLogger.Errorf("Error sending %s: %s", msg.Type.String(), err)
+ return fmt.Errorf("Error sending %s: %s", msg.Type.String(), err)
+ }
+ return nil
+}
+
+func (handler *Handler) createTxContext(uuid string, tx *pb.Transaction) (*transactionContext, error) {
+ if handler.txCtxs == nil {
+ return nil, fmt.Errorf("cannot create notifier for Uuid:%s", uuid)
+ }
+ handler.Lock()
+ defer handler.Unlock()
+ if handler.txCtxs[uuid] != nil {
+ return nil, fmt.Errorf("Uuid:%s exists", uuid)
+ }
+ txctx := &transactionContext{transactionSecContext: tx, responseNotifier: make(chan *pb.ChaincodeMessage, 1),
+ rangeQueryIteratorMap: make(map[string]statemgmt.RangeScanIterator)}
+ handler.txCtxs[uuid] = txctx
+ return txctx, nil
+}
+
+func (handler *Handler) getTxContext(uuid string) *transactionContext {
+ handler.Lock()
+ defer handler.Unlock()
+ return handler.txCtxs[uuid]
+}
+
+func (handler *Handler) deleteTxContext(uuid string) {
+ handler.Lock()
+ defer handler.Unlock()
+ if handler.txCtxs != nil {
+ delete(handler.txCtxs, uuid)
+ }
+}
+
+func (handler *Handler) putRangeQueryIterator(txContext *transactionContext, uuid string,
+ rangeScanIterator statemgmt.RangeScanIterator) {
+ handler.Lock()
+ defer handler.Unlock()
+ txContext.rangeQueryIteratorMap[uuid] = rangeScanIterator
+}
+
+func (handler *Handler) getRangeQueryIterator(txContext *transactionContext, uuid string) statemgmt.RangeScanIterator {
+ handler.Lock()
+ defer handler.Unlock()
+ return txContext.rangeQueryIteratorMap[uuid]
+}
+
+func (handler *Handler) deleteRangeQueryIterator(txContext *transactionContext, uuid string) {
+ handler.Lock()
+ defer handler.Unlock()
+ delete(txContext.rangeQueryIteratorMap, uuid)
+}
+
+//THIS CAN BE REMOVED ONCE WE SUPPORT CONFIDENTIALITY WITH CC-CALLING-CC
+//we dissallow chaincode-chaincode interactions till confidentiality implications are understood
+func (handler *Handler) canCallChaincode(uuid string) *pb.ChaincodeMessage {
+ secHelper := handler.chaincodeSupport.getSecHelper()
+ if secHelper == nil {
+ return nil
+ }
+
+ var errMsg string
+ txctx := handler.getTxContext(uuid)
+ if txctx == nil {
+ errMsg = fmt.Sprintf("[%s]Error no context while checking for confidentiality. Sending %s", shortuuid(uuid), pb.ChaincodeMessage_ERROR)
+ } else if txctx.transactionSecContext == nil {
+ errMsg = fmt.Sprintf("[%s]Error transaction context is nil while checking for confidentiality. Sending %s", shortuuid(uuid), pb.ChaincodeMessage_ERROR)
+ } else if txctx.transactionSecContext.ConfidentialityLevel != pb.ConfidentialityLevel_PUBLIC {
+ errMsg = fmt.Sprintf("[%s]Error chaincode-chaincode interactions not supported for with privacy enabled. Sending %s", shortuuid(uuid), pb.ChaincodeMessage_ERROR)
+ }
+
+ if errMsg != "" {
+ return &pb.ChaincodeMessage{Type: pb.ChaincodeMessage_ERROR, Payload: []byte(errMsg), Uuid: uuid}
+ }
+
+ //not CONFIDENTIAL transaction, OK to call CC
+ return nil
+}
+
+func (handler *Handler) encryptOrDecrypt(encrypt bool, uuid string, payload []byte) ([]byte, error) {
+ secHelper := handler.chaincodeSupport.getSecHelper()
+ if secHelper == nil {
+ return payload, nil
+ }
+
+ txctx := handler.getTxContext(uuid)
+ if txctx == nil {
+ return nil, fmt.Errorf("[%s]No context for uuid %s", shortuuid(uuid), uuid)
+ }
+ if txctx.transactionSecContext == nil {
+ return nil, fmt.Errorf("[%s]transaction context is nil for uuid %s", shortuuid(uuid), uuid)
+ }
+ // TODO: this must be removed
+ if txctx.transactionSecContext.ConfidentialityLevel == pb.ConfidentialityLevel_PUBLIC {
+ return payload, nil
+ }
+
+ var enc crypto.StateEncryptor
+ var err error
+ if txctx.transactionSecContext.Type == pb.Transaction_CHAINCODE_DEPLOY {
+ if enc, err = secHelper.GetStateEncryptor(handler.deployTXSecContext, handler.deployTXSecContext); err != nil {
+ return nil, fmt.Errorf("error getting crypto encryptor for deploy tx :%s", err)
+ }
+ } else if txctx.transactionSecContext.Type == pb.Transaction_CHAINCODE_INVOKE || txctx.transactionSecContext.Type == pb.Transaction_CHAINCODE_QUERY {
+ if enc, err = secHelper.GetStateEncryptor(handler.deployTXSecContext, txctx.transactionSecContext); err != nil {
+ return nil, fmt.Errorf("error getting crypto encryptor %s", err)
+ }
+ } else {
+ return nil, fmt.Errorf("invalid transaction type %s", txctx.transactionSecContext.Type.String())
+ }
+ if enc == nil {
+ return nil, fmt.Errorf("secure context returns nil encryptor for tx %s", uuid)
+ }
+ if chaincodeLogger.IsEnabledFor(logging.DEBUG) {
+ chaincodeLogger.Debugf("[%s]Payload before encrypt/decrypt: %v", shortuuid(uuid), payload)
+ }
+ if encrypt {
+ payload, err = enc.Encrypt(payload)
+ } else {
+ payload, err = enc.Decrypt(payload)
+ }
+ if chaincodeLogger.IsEnabledFor(logging.DEBUG) {
+ chaincodeLogger.Debugf("[%s]Payload after encrypt/decrypt: %v", shortuuid(uuid), payload)
+ }
+
+ return payload, err
+}
+
+func (handler *Handler) decrypt(uuid string, payload []byte) ([]byte, error) {
+ return handler.encryptOrDecrypt(false, uuid, payload)
+}
+
+func (handler *Handler) encrypt(uuid string, payload []byte) ([]byte, error) {
+ return handler.encryptOrDecrypt(true, uuid, payload)
+}
+
+func (handler *Handler) getSecurityBinding(tx *pb.Transaction) ([]byte, error) {
+ secHelper := handler.chaincodeSupport.getSecHelper()
+ if secHelper == nil {
+ return nil, nil
+ }
+
+ return secHelper.GetTransactionBinding(tx)
+}
+
+func (handler *Handler) deregister() error {
+ if handler.registered {
+ handler.chaincodeSupport.deregisterHandler(handler)
+ }
+ return nil
+}
+
+func (handler *Handler) triggerNextState(msg *pb.ChaincodeMessage, send bool) {
+ handler.nextState <- &nextStateInfo{msg, send}
+}
+
+func (handler *Handler) waitForKeepaliveTimer() <-chan time.Time {
+ if handler.chaincodeSupport.keepalive > 0 {
+ c := time.After(handler.chaincodeSupport.keepalive)
+ return c
+ }
+ //no one will signal this channel, listner blocks forever
+ c := make(chan time.Time, 1)
+ return c
+}
+
+func (handler *Handler) processStream() error {
+ defer handler.deregister()
+ msgAvail := make(chan *pb.ChaincodeMessage)
+ var nsInfo *nextStateInfo
+ var in *pb.ChaincodeMessage
+ var err error
+
+ //recv is used to spin Recv routine after previous received msg
+ //has been processed
+ recv := true
+ for {
+ in = nil
+ err = nil
+ nsInfo = nil
+ if recv {
+ recv = false
+ go func() {
+ var in2 *pb.ChaincodeMessage
+ in2, err = handler.ChatStream.Recv()
+ msgAvail <- in2
+ }()
+ }
+ select {
+ case in = <-msgAvail:
+ // Defer the deregistering of the this handler.
+ if err == io.EOF {
+ chaincodeLogger.Debugf("Received EOF, ending chaincode support stream, %s", err)
+ return err
+ } else if err != nil {
+ chaincodeLogger.Errorf("Error handling chaincode support stream: %s", err)
+ return err
+ } else if in == nil {
+ err = fmt.Errorf("Received nil message, ending chaincode support stream")
+ chaincodeLogger.Debug("Received nil message, ending chaincode support stream")
+ return err
+ }
+ chaincodeLogger.Debugf("[%s]Received message %s from shim", shortuuid(in.Uuid), in.Type.String())
+ if in.Type.String() == pb.ChaincodeMessage_ERROR.String() {
+ chaincodeLogger.Errorf("Got error: %s", string(in.Payload))
+ }
+
+ // we can spin off another Recv again
+ recv = true
+
+ if in.Type == pb.ChaincodeMessage_KEEPALIVE {
+ chaincodeLogger.Debug("Received KEEPALIVE Response")
+ // Received a keep alive message, we don't do anything with it for now
+ // and it does not touch the state machine
+ continue
+ }
+ case nsInfo = <-handler.nextState:
+ in = nsInfo.msg
+ if in == nil {
+ err = fmt.Errorf("Next state nil message, ending chaincode support stream")
+ chaincodeLogger.Debug("Next state nil message, ending chaincode support stream")
+ return err
+ }
+ chaincodeLogger.Debugf("[%s]Move state message %s", shortuuid(in.Uuid), in.Type.String())
+ case <-handler.waitForKeepaliveTimer():
+ if handler.chaincodeSupport.keepalive <= 0 {
+ chaincodeLogger.Errorf("Invalid select: keepalive not on (keepalive=%d)", handler.chaincodeSupport.keepalive)
+ continue
+ }
+
+ //TODO we could use this to hook into container lifecycle (kill the chaincode if not in use, etc)
+ kaerr := handler.serialSend(&pb.ChaincodeMessage{Type: pb.ChaincodeMessage_KEEPALIVE})
+ if kaerr != nil {
+ chaincodeLogger.Errorf("Error sending keepalive, err=%s", kaerr)
+ } else {
+ chaincodeLogger.Debug("Sent KEEPALIVE request")
+ }
+ //keepalive message kicked in. just continue
+ continue
+ }
+
+ err = handler.HandleMessage(in)
+ if err != nil {
+ chaincodeLogger.Errorf("[%s]Error handling message, ending stream: %s", shortuuid(in.Uuid), err)
+ return fmt.Errorf("Error handling message, ending stream: %s", err)
+ }
+
+ if nsInfo != nil && nsInfo.sendToCC {
+ chaincodeLogger.Debugf("[%s]sending state message %s", shortuuid(in.Uuid), in.Type.String())
+ if err = handler.serialSend(in); err != nil {
+ chaincodeLogger.Errorf("[%s]serial sending received error %s", shortuuid(in.Uuid), err)
+ return fmt.Errorf("[%s]serial sending received error %s", shortuuid(in.Uuid), err)
+ }
+ }
+ }
+}
+
+// HandleChaincodeStream Main loop for handling the associated Chaincode stream
+func HandleChaincodeStream(chaincodeSupport *ChaincodeSupport, ctxt context.Context, stream ccintf.ChaincodeStream) error {
+ deadline, ok := ctxt.Deadline()
+ chaincodeLogger.Debugf("Current context deadline = %s, ok = %v", deadline, ok)
+ handler := newChaincodeSupportHandler(chaincodeSupport, stream)
+ return handler.processStream()
+}
+
+func newChaincodeSupportHandler(chaincodeSupport *ChaincodeSupport, peerChatStream ccintf.ChaincodeStream) *Handler {
+ v := &Handler{
+ ChatStream: peerChatStream,
+ }
+ v.chaincodeSupport = chaincodeSupport
+ //we want this to block
+ v.nextState = make(chan *nextStateInfo)
+
+ v.FSM = fsm.NewFSM(
+ createdstate,
+ fsm.Events{
+ //Send REGISTERED, then, if deploy { trigger INIT(via INIT) } else { trigger READY(via COMPLETED) }
+ {Name: pb.ChaincodeMessage_REGISTER.String(), Src: []string{createdstate}, Dst: establishedstate},
+ {Name: pb.ChaincodeMessage_INIT.String(), Src: []string{establishedstate}, Dst: initstate},
+ {Name: pb.ChaincodeMessage_READY.String(), Src: []string{establishedstate}, Dst: readystate},
+ {Name: pb.ChaincodeMessage_TRANSACTION.String(), Src: []string{readystate}, Dst: transactionstate},
+ {Name: pb.ChaincodeMessage_PUT_STATE.String(), Src: []string{transactionstate}, Dst: busyxactstate},
+ {Name: pb.ChaincodeMessage_DEL_STATE.String(), Src: []string{transactionstate}, Dst: busyxactstate},
+ {Name: pb.ChaincodeMessage_INVOKE_CHAINCODE.String(), Src: []string{transactionstate}, Dst: busyxactstate},
+ {Name: pb.ChaincodeMessage_PUT_STATE.String(), Src: []string{initstate}, Dst: busyinitstate},
+ {Name: pb.ChaincodeMessage_DEL_STATE.String(), Src: []string{initstate}, Dst: busyinitstate},
+ {Name: pb.ChaincodeMessage_INVOKE_CHAINCODE.String(), Src: []string{initstate}, Dst: busyinitstate},
+ {Name: pb.ChaincodeMessage_COMPLETED.String(), Src: []string{initstate, readystate, transactionstate}, Dst: readystate},
+ {Name: pb.ChaincodeMessage_GET_STATE.String(), Src: []string{readystate}, Dst: readystate},
+ {Name: pb.ChaincodeMessage_GET_STATE.String(), Src: []string{initstate}, Dst: initstate},
+ {Name: pb.ChaincodeMessage_GET_STATE.String(), Src: []string{busyinitstate}, Dst: busyinitstate},
+ {Name: pb.ChaincodeMessage_GET_STATE.String(), Src: []string{transactionstate}, Dst: transactionstate},
+ {Name: pb.ChaincodeMessage_GET_STATE.String(), Src: []string{busyxactstate}, Dst: busyxactstate},
+ {Name: pb.ChaincodeMessage_RANGE_QUERY_STATE.String(), Src: []string{readystate}, Dst: readystate},
+ {Name: pb.ChaincodeMessage_RANGE_QUERY_STATE.String(), Src: []string{initstate}, Dst: initstate},
+ {Name: pb.ChaincodeMessage_RANGE_QUERY_STATE.String(), Src: []string{busyinitstate}, Dst: busyinitstate},
+ {Name: pb.ChaincodeMessage_RANGE_QUERY_STATE.String(), Src: []string{transactionstate}, Dst: transactionstate},
+ {Name: pb.ChaincodeMessage_RANGE_QUERY_STATE.String(), Src: []string{busyxactstate}, Dst: busyxactstate},
+ {Name: pb.ChaincodeMessage_RANGE_QUERY_STATE_NEXT.String(), Src: []string{readystate}, Dst: readystate},
+ {Name: pb.ChaincodeMessage_RANGE_QUERY_STATE_NEXT.String(), Src: []string{initstate}, Dst: initstate},
+ {Name: pb.ChaincodeMessage_RANGE_QUERY_STATE_NEXT.String(), Src: []string{busyinitstate}, Dst: busyinitstate},
+ {Name: pb.ChaincodeMessage_RANGE_QUERY_STATE_NEXT.String(), Src: []string{transactionstate}, Dst: transactionstate},
+ {Name: pb.ChaincodeMessage_RANGE_QUERY_STATE_NEXT.String(), Src: []string{busyxactstate}, Dst: busyxactstate},
+ {Name: pb.ChaincodeMessage_RANGE_QUERY_STATE_CLOSE.String(), Src: []string{readystate}, Dst: readystate},
+ {Name: pb.ChaincodeMessage_RANGE_QUERY_STATE_CLOSE.String(), Src: []string{initstate}, Dst: initstate},
+ {Name: pb.ChaincodeMessage_RANGE_QUERY_STATE_CLOSE.String(), Src: []string{busyinitstate}, Dst: busyinitstate},
+ {Name: pb.ChaincodeMessage_RANGE_QUERY_STATE_CLOSE.String(), Src: []string{transactionstate}, Dst: transactionstate},
+ {Name: pb.ChaincodeMessage_RANGE_QUERY_STATE_CLOSE.String(), Src: []string{busyxactstate}, Dst: busyxactstate},
+ {Name: pb.ChaincodeMessage_ERROR.String(), Src: []string{initstate}, Dst: endstate},
+ {Name: pb.ChaincodeMessage_ERROR.String(), Src: []string{transactionstate}, Dst: readystate},
+ {Name: pb.ChaincodeMessage_ERROR.String(), Src: []string{busyinitstate}, Dst: initstate},
+ {Name: pb.ChaincodeMessage_ERROR.String(), Src: []string{busyxactstate}, Dst: transactionstate},
+ {Name: pb.ChaincodeMessage_RESPONSE.String(), Src: []string{busyinitstate}, Dst: initstate},
+ {Name: pb.ChaincodeMessage_RESPONSE.String(), Src: []string{busyxactstate}, Dst: transactionstate},
+ },
+ fsm.Callbacks{
+ "before_" + pb.ChaincodeMessage_REGISTER.String(): func(e *fsm.Event) { v.beforeRegisterEvent(e, v.FSM.Current()) },
+ "before_" + pb.ChaincodeMessage_COMPLETED.String(): func(e *fsm.Event) { v.beforeCompletedEvent(e, v.FSM.Current()) },
+ "before_" + pb.ChaincodeMessage_INIT.String(): func(e *fsm.Event) { v.beforeInitState(e, v.FSM.Current()) },
+ "after_" + pb.ChaincodeMessage_GET_STATE.String(): func(e *fsm.Event) { v.afterGetState(e, v.FSM.Current()) },
+ "after_" + pb.ChaincodeMessage_RANGE_QUERY_STATE.String(): func(e *fsm.Event) { v.afterRangeQueryState(e, v.FSM.Current()) },
+ "after_" + pb.ChaincodeMessage_RANGE_QUERY_STATE_NEXT.String(): func(e *fsm.Event) { v.afterRangeQueryStateNext(e, v.FSM.Current()) },
+ "after_" + pb.ChaincodeMessage_RANGE_QUERY_STATE_CLOSE.String(): func(e *fsm.Event) { v.afterRangeQueryStateClose(e, v.FSM.Current()) },
+ "after_" + pb.ChaincodeMessage_PUT_STATE.String(): func(e *fsm.Event) { v.afterPutState(e, v.FSM.Current()) },
+ "after_" + pb.ChaincodeMessage_DEL_STATE.String(): func(e *fsm.Event) { v.afterDelState(e, v.FSM.Current()) },
+ "after_" + pb.ChaincodeMessage_INVOKE_CHAINCODE.String(): func(e *fsm.Event) { v.afterInvokeChaincode(e, v.FSM.Current()) },
+ "enter_" + establishedstate: func(e *fsm.Event) { v.enterEstablishedState(e, v.FSM.Current()) },
+ "enter_" + initstate: func(e *fsm.Event) { v.enterInitState(e, v.FSM.Current()) },
+ "enter_" + readystate: func(e *fsm.Event) { v.enterReadyState(e, v.FSM.Current()) },
+ "enter_" + busyinitstate: func(e *fsm.Event) { v.enterBusyState(e, v.FSM.Current()) },
+ "enter_" + busyxactstate: func(e *fsm.Event) { v.enterBusyState(e, v.FSM.Current()) },
+ "enter_" + endstate: func(e *fsm.Event) { v.enterEndState(e, v.FSM.Current()) },
+ },
+ )
+
+ return v
+}
+
+func (handler *Handler) createUUIDEntry(uuid string) bool {
+ if handler.uuidMap == nil {
+ return false
+ }
+ handler.Lock()
+ defer handler.Unlock()
+ if handler.uuidMap[uuid] {
+ return false
+ }
+ handler.uuidMap[uuid] = true
+ return handler.uuidMap[uuid]
+}
+
+func (handler *Handler) deleteUUIDEntry(uuid string) {
+ handler.Lock()
+ defer handler.Unlock()
+ if handler.uuidMap != nil {
+ delete(handler.uuidMap, uuid)
+ } else {
+ chaincodeLogger.Errorf("UUID %s not found!", uuid)
+ }
+}
+
+// markIsTransaction marks a UUID as a transaction or a query; true = transaction, false = query
+func (handler *Handler) markIsTransaction(uuid string, isTrans bool) bool {
+ handler.Lock()
+ defer handler.Unlock()
+ if handler.isTransaction == nil {
+ return false
+ }
+ handler.isTransaction[uuid] = isTrans
+ return true
+}
+
+func (handler *Handler) getIsTransaction(uuid string) bool {
+ handler.Lock()
+ defer handler.Unlock()
+ if handler.isTransaction == nil {
+ return false
+ }
+ return handler.isTransaction[uuid]
+}
+
+func (handler *Handler) deleteIsTransaction(uuid string) {
+ handler.Lock()
+ defer handler.Unlock()
+ if handler.isTransaction != nil {
+ delete(handler.isTransaction, uuid)
+ }
+}
+
+func (handler *Handler) notifyDuringStartup(val bool) {
+ //if USER_RUNS_CC readyNotify will be nil
+ if handler.readyNotify != nil {
+ chaincodeLogger.Debug("Notifying during startup")
+ handler.readyNotify <- val
+ } else {
+ chaincodeLogger.Debug("nothing to notify (dev mode ?)")
+ }
+}
+
+// beforeRegisterEvent is invoked when chaincode tries to register.
+func (handler *Handler) beforeRegisterEvent(e *fsm.Event, state string) {
+ chaincodeLogger.Debugf("Received %s in state %s", e.Event, state)
+ msg, ok := e.Args[0].(*pb.ChaincodeMessage)
+ if !ok {
+ e.Cancel(fmt.Errorf("Received unexpected message type"))
+ return
+ }
+ chaincodeID := &pb.ChaincodeID{}
+ err := proto.Unmarshal(msg.Payload, chaincodeID)
+ if err != nil {
+ e.Cancel(fmt.Errorf("Error in received %s, could NOT unmarshal registration info: %s", pb.ChaincodeMessage_REGISTER, err))
+ return
+ }
+
+ // Now register with the chaincodeSupport
+ handler.ChaincodeID = chaincodeID
+ err = handler.chaincodeSupport.registerHandler(handler)
+ if err != nil {
+ e.Cancel(err)
+ handler.notifyDuringStartup(false)
+ return
+ }
+
+ chaincodeLogger.Debugf("Got %s for chaincodeID = %s, sending back %s", e.Event, chaincodeID, pb.ChaincodeMessage_REGISTERED)
+ if err := handler.serialSend(&pb.ChaincodeMessage{Type: pb.ChaincodeMessage_REGISTERED}); err != nil {
+ e.Cancel(fmt.Errorf("Error sending %s: %s", pb.ChaincodeMessage_REGISTERED, err))
+ handler.notifyDuringStartup(false)
+ return
+ }
+}
+
+func (handler *Handler) notify(msg *pb.ChaincodeMessage) {
+ handler.Lock()
+ defer handler.Unlock()
+ tctx := handler.txCtxs[msg.Uuid]
+ if tctx == nil {
+ chaincodeLogger.Debugf("notifier Uuid:%s does not exist", msg.Uuid)
+ } else {
+ chaincodeLogger.Debugf("notifying Uuid:%s", msg.Uuid)
+ tctx.responseNotifier <- msg
+
+ // clean up rangeQueryIteratorMap
+ for _, v := range tctx.rangeQueryIteratorMap {
+ v.Close()
+ }
+ }
+}
+
+// beforeCompletedEvent is invoked when chaincode has completed execution of init, invoke or query.
+func (handler *Handler) beforeCompletedEvent(e *fsm.Event, state string) {
+ msg, ok := e.Args[0].(*pb.ChaincodeMessage)
+ if !ok {
+ e.Cancel(fmt.Errorf("Received unexpected message type"))
+ return
+ }
+ // Notify on channel once into READY state
+ chaincodeLogger.Debugf("[%s]beforeCompleted - not in ready state will notify when in readystate", shortuuid(msg.Uuid))
+ return
+}
+
+// beforeInitState is invoked before an init message is sent to the chaincode.
+func (handler *Handler) beforeInitState(e *fsm.Event, state string) {
+ chaincodeLogger.Debugf("Before state %s.. notifying waiter that we are up", state)
+ handler.notifyDuringStartup(true)
+}
+
+// afterGetState handles a GET_STATE request from the chaincode.
+func (handler *Handler) afterGetState(e *fsm.Event, state string) {
+ msg, ok := e.Args[0].(*pb.ChaincodeMessage)
+ if !ok {
+ e.Cancel(fmt.Errorf("Received unexpected message type"))
+ return
+ }
+ chaincodeLogger.Debugf("[%s]Received %s, invoking get state from ledger", shortuuid(msg.Uuid), pb.ChaincodeMessage_GET_STATE)
+
+ // Query ledger for state
+ handler.handleGetState(msg)
+}
+
+// Handles query to ledger to get state
+func (handler *Handler) handleGetState(msg *pb.ChaincodeMessage) {
+ // The defer followed by triggering a go routine dance is needed to ensure that the previous state transition
+ // is completed before the next one is triggered. The previous state transition is deemed complete only when
+ // the afterGetState function is exited. Interesting bug fix!!
+ go func() {
+ // Check if this is the unique state request from this chaincode uuid
+ uniqueReq := handler.createUUIDEntry(msg.Uuid)
+ if !uniqueReq {
+ // Drop this request
+ chaincodeLogger.Error("Another state request pending for this Uuid. Cannot process.")
+ return
+ }
+
+ var serialSendMsg *pb.ChaincodeMessage
+
+ defer func() {
+ handler.deleteUUIDEntry(msg.Uuid)
+ chaincodeLogger.Debugf("[%s]handleGetState serial send %s", shortuuid(serialSendMsg.Uuid), serialSendMsg.Type)
+ handler.serialSend(serialSendMsg)
+ }()
+
+ key := string(msg.Payload)
+ ledgerObj, ledgerErr := ledger.GetLedger()
+ if ledgerErr != nil {
+ // Send error msg back to chaincode. GetState will not trigger event
+ payload := []byte(ledgerErr.Error())
+ chaincodeLogger.Errorf("Failed to get chaincode state(%s). Sending %s", ledgerErr, pb.ChaincodeMessage_ERROR)
+ // Remove uuid from current set
+ serialSendMsg = &pb.ChaincodeMessage{Type: pb.ChaincodeMessage_ERROR, Payload: payload, Uuid: msg.Uuid}
+ return
+ }
+
+ // Invoke ledger to get state
+ chaincodeID := handler.ChaincodeID.Name
+
+ readCommittedState := !handler.getIsTransaction(msg.Uuid)
+ res, err := ledgerObj.GetState(chaincodeID, key, readCommittedState)
+ if err != nil {
+ // Send error msg back to chaincode. GetState will not trigger event
+ payload := []byte(err.Error())
+ chaincodeLogger.Errorf("[%s]Failed to get chaincode state(%s). Sending %s", shortuuid(msg.Uuid), err, pb.ChaincodeMessage_ERROR)
+ serialSendMsg = &pb.ChaincodeMessage{Type: pb.ChaincodeMessage_ERROR, Payload: payload, Uuid: msg.Uuid}
+ } else if res == nil {
+ //The state object being requested does not exist, so don't attempt to decrypt it
+ chaincodeLogger.Debugf("[%s]No state associated with key: %s. Sending %s with an empty payload", shortuuid(msg.Uuid), key, pb.ChaincodeMessage_RESPONSE)
+ serialSendMsg = &pb.ChaincodeMessage{Type: pb.ChaincodeMessage_RESPONSE, Payload: res, Uuid: msg.Uuid}
+ } else {
+ // Decrypt the data if the confidential is enabled
+ if res, err = handler.decrypt(msg.Uuid, res); err == nil {
+ // Send response msg back to chaincode. GetState will not trigger event
+ chaincodeLogger.Debugf("[%s]Got state. Sending %s", shortuuid(msg.Uuid), pb.ChaincodeMessage_RESPONSE)
+ serialSendMsg = &pb.ChaincodeMessage{Type: pb.ChaincodeMessage_RESPONSE, Payload: res, Uuid: msg.Uuid}
+ } else {
+ // Send err msg back to chaincode.
+ chaincodeLogger.Errorf("[%s]Got error (%s) while decrypting. Sending %s", shortuuid(msg.Uuid), err, pb.ChaincodeMessage_ERROR)
+ errBytes := []byte(err.Error())
+ serialSendMsg = &pb.ChaincodeMessage{Type: pb.ChaincodeMessage_ERROR, Payload: errBytes, Uuid: msg.Uuid}
+ }
+
+ }
+
+ }()
+}
+
+const maxRangeQueryStateLimit = 100
+
+// afterRangeQueryState handles a RANGE_QUERY_STATE request from the chaincode.
+func (handler *Handler) afterRangeQueryState(e *fsm.Event, state string) {
+ msg, ok := e.Args[0].(*pb.ChaincodeMessage)
+ if !ok {
+ e.Cancel(fmt.Errorf("Received unexpected message type"))
+ return
+ }
+ chaincodeLogger.Debugf("Received %s, invoking get state from ledger", pb.ChaincodeMessage_RANGE_QUERY_STATE)
+
+ // Query ledger for state
+ handler.handleRangeQueryState(msg)
+ chaincodeLogger.Debug("Exiting GET_STATE")
+}
+
+// Handles query to ledger to rage query state
+func (handler *Handler) handleRangeQueryState(msg *pb.ChaincodeMessage) {
+ // The defer followed by triggering a go routine dance is needed to ensure that the previous state transition
+ // is completed before the next one is triggered. The previous state transition is deemed complete only when
+ // the afterRangeQueryState function is exited. Interesting bug fix!!
+ go func() {
+ // Check if this is the unique state request from this chaincode uuid
+ uniqueReq := handler.createUUIDEntry(msg.Uuid)
+ if !uniqueReq {
+ // Drop this request
+ chaincodeLogger.Error("Another state request pending for this Uuid. Cannot process.")
+ return
+ }
+
+ var serialSendMsg *pb.ChaincodeMessage
+
+ defer func() {
+ handler.deleteUUIDEntry(msg.Uuid)
+ chaincodeLogger.Debugf("[%s]handleRangeQueryState serial send %s", shortuuid(serialSendMsg.Uuid), serialSendMsg.Type)
+ handler.serialSend(serialSendMsg)
+ }()
+
+ rangeQueryState := &pb.RangeQueryState{}
+ unmarshalErr := proto.Unmarshal(msg.Payload, rangeQueryState)
+ if unmarshalErr != nil {
+ payload := []byte(unmarshalErr.Error())
+ chaincodeLogger.Errorf("Failed to unmarshall range query request. Sending %s", pb.ChaincodeMessage_ERROR)
+ serialSendMsg = &pb.ChaincodeMessage{Type: pb.ChaincodeMessage_ERROR, Payload: payload, Uuid: msg.Uuid}
+ return
+ }
+
+ hasNext := true
+
+ ledger, ledgerErr := ledger.GetLedger()
+ if ledgerErr != nil {
+ // Send error msg back to chaincode. GetState will not trigger event
+ payload := []byte(ledgerErr.Error())
+ chaincodeLogger.Errorf("Failed to get ledger. Sending %s", pb.ChaincodeMessage_ERROR)
+ serialSendMsg = &pb.ChaincodeMessage{Type: pb.ChaincodeMessage_ERROR, Payload: payload, Uuid: msg.Uuid}
+ return
+ }
+
+ chaincodeID := handler.ChaincodeID.Name
+
+ readCommittedState := !handler.getIsTransaction(msg.Uuid)
+ rangeIter, err := ledger.GetStateRangeScanIterator(chaincodeID, rangeQueryState.StartKey, rangeQueryState.EndKey, readCommittedState)
+ if err != nil {
+ // Send error msg back to chaincode. GetState will not trigger event
+ payload := []byte(err.Error())
+ chaincodeLogger.Errorf("Failed to get ledger scan iterator. Sending %s", pb.ChaincodeMessage_ERROR)
+ serialSendMsg = &pb.ChaincodeMessage{Type: pb.ChaincodeMessage_ERROR, Payload: payload, Uuid: msg.Uuid}
+ return
+ }
+
+ iterID := util.GenerateUUID()
+ txContext := handler.getTxContext(msg.Uuid)
+ handler.putRangeQueryIterator(txContext, iterID, rangeIter)
+
+ hasNext = rangeIter.Next()
+
+ var keysAndValues []*pb.RangeQueryStateKeyValue
+ var i = uint32(0)
+ for ; hasNext && i < maxRangeQueryStateLimit; i++ {
+ key, value := rangeIter.GetKeyValue()
+ // Decrypt the data if the confidential is enabled
+ decryptedValue, decryptErr := handler.decrypt(msg.Uuid, value)
+ if decryptErr != nil {
+ payload := []byte(decryptErr.Error())
+ chaincodeLogger.Errorf("Failed decrypt value. Sending %s", pb.ChaincodeMessage_ERROR)
+ serialSendMsg = &pb.ChaincodeMessage{Type: pb.ChaincodeMessage_ERROR, Payload: payload, Uuid: msg.Uuid}
+
+ rangeIter.Close()
+ handler.deleteRangeQueryIterator(txContext, iterID)
+
+ return
+ }
+ keyAndValue := pb.RangeQueryStateKeyValue{Key: key, Value: decryptedValue}
+ keysAndValues = append(keysAndValues, &keyAndValue)
+
+ hasNext = rangeIter.Next()
+ }
+
+ if !hasNext {
+ rangeIter.Close()
+ handler.deleteRangeQueryIterator(txContext, iterID)
+ }
+
+ payload := &pb.RangeQueryStateResponse{KeysAndValues: keysAndValues, HasMore: hasNext, ID: iterID}
+ payloadBytes, err := proto.Marshal(payload)
+ if err != nil {
+ rangeIter.Close()
+ handler.deleteRangeQueryIterator(txContext, iterID)
+
+ // Send error msg back to chaincode. GetState will not trigger event
+ payload := []byte(err.Error())
+ chaincodeLogger.Errorf("Failed marshall resopnse. Sending %s", pb.ChaincodeMessage_ERROR)
+ serialSendMsg = &pb.ChaincodeMessage{Type: pb.ChaincodeMessage_ERROR, Payload: payload, Uuid: msg.Uuid}
+ return
+ }
+
+ chaincodeLogger.Debugf("Got keys and values. Sending %s", pb.ChaincodeMessage_RESPONSE)
+ serialSendMsg = &pb.ChaincodeMessage{Type: pb.ChaincodeMessage_RESPONSE, Payload: payloadBytes, Uuid: msg.Uuid}
+
+ }()
+}
+
+// afterRangeQueryState handles a RANGE_QUERY_STATE_NEXT request from the chaincode.
+func (handler *Handler) afterRangeQueryStateNext(e *fsm.Event, state string) {
+ msg, ok := e.Args[0].(*pb.ChaincodeMessage)
+ if !ok {
+ e.Cancel(fmt.Errorf("Received unexpected message type"))
+ return
+ }
+ chaincodeLogger.Debugf("Received %s, invoking get state from ledger", pb.ChaincodeMessage_RANGE_QUERY_STATE)
+
+ // Query ledger for state
+ handler.handleRangeQueryStateNext(msg)
+ chaincodeLogger.Debug("Exiting RANGE_QUERY_STATE_NEXT")
+}
+
+// Handles query to ledger to rage query state next
+func (handler *Handler) handleRangeQueryStateNext(msg *pb.ChaincodeMessage) {
+ // The defer followed by triggering a go routine dance is needed to ensure that the previous state transition
+ // is completed before the next one is triggered. The previous state transition is deemed complete only when
+ // the afterRangeQueryState function is exited. Interesting bug fix!!
+ go func() {
+ // Check if this is the unique state request from this chaincode uuid
+ uniqueReq := handler.createUUIDEntry(msg.Uuid)
+ if !uniqueReq {
+ // Drop this request
+ chaincodeLogger.Debug("Another state request pending for this Uuid. Cannot process.")
+ return
+ }
+
+ var serialSendMsg *pb.ChaincodeMessage
+
+ defer func() {
+ handler.deleteUUIDEntry(msg.Uuid)
+ chaincodeLogger.Debugf("[%s]handleRangeQueryState serial send %s", shortuuid(serialSendMsg.Uuid), serialSendMsg.Type)
+ handler.serialSend(serialSendMsg)
+ }()
+
+ rangeQueryStateNext := &pb.RangeQueryStateNext{}
+ unmarshalErr := proto.Unmarshal(msg.Payload, rangeQueryStateNext)
+ if unmarshalErr != nil {
+ payload := []byte(unmarshalErr.Error())
+ chaincodeLogger.Errorf("Failed to unmarshall state range next query request. Sending %s", pb.ChaincodeMessage_ERROR)
+ serialSendMsg = &pb.ChaincodeMessage{Type: pb.ChaincodeMessage_ERROR, Payload: payload, Uuid: msg.Uuid}
+ return
+ }
+
+ txContext := handler.getTxContext(msg.Uuid)
+ rangeIter := handler.getRangeQueryIterator(txContext, rangeQueryStateNext.ID)
+
+ if rangeIter == nil {
+ payload := []byte("Range query iterator not found")
+ chaincodeLogger.Errorf("Range query iterator not found. Sending %s", pb.ChaincodeMessage_ERROR)
+ serialSendMsg = &pb.ChaincodeMessage{Type: pb.ChaincodeMessage_ERROR, Payload: payload, Uuid: msg.Uuid}
+ return
+ }
+
+ var keysAndValues []*pb.RangeQueryStateKeyValue
+ var i = uint32(0)
+ hasNext := true
+ for ; hasNext && i < maxRangeQueryStateLimit; i++ {
+ key, value := rangeIter.GetKeyValue()
+ // Decrypt the data if the confidential is enabled
+ decryptedValue, decryptErr := handler.decrypt(msg.Uuid, value)
+ if decryptErr != nil {
+ payload := []byte(decryptErr.Error())
+ chaincodeLogger.Errorf("Failed decrypt value. Sending %s", pb.ChaincodeMessage_ERROR)
+ serialSendMsg = &pb.ChaincodeMessage{Type: pb.ChaincodeMessage_ERROR, Payload: payload, Uuid: msg.Uuid}
+
+ rangeIter.Close()
+ handler.deleteRangeQueryIterator(txContext, rangeQueryStateNext.ID)
+
+ return
+ }
+ keyAndValue := pb.RangeQueryStateKeyValue{Key: key, Value: decryptedValue}
+ keysAndValues = append(keysAndValues, &keyAndValue)
+
+ hasNext = rangeIter.Next()
+ }
+
+ if !hasNext {
+ rangeIter.Close()
+ handler.deleteRangeQueryIterator(txContext, rangeQueryStateNext.ID)
+ }
+
+ payload := &pb.RangeQueryStateResponse{KeysAndValues: keysAndValues, HasMore: hasNext, ID: rangeQueryStateNext.ID}
+ payloadBytes, err := proto.Marshal(payload)
+ if err != nil {
+ rangeIter.Close()
+ handler.deleteRangeQueryIterator(txContext, rangeQueryStateNext.ID)
+
+ // Send error msg back to chaincode. GetState will not trigger event
+ payload := []byte(err.Error())
+ chaincodeLogger.Errorf("Failed marshall resopnse. Sending %s", pb.ChaincodeMessage_ERROR)
+ serialSendMsg = &pb.ChaincodeMessage{Type: pb.ChaincodeMessage_ERROR, Payload: payload, Uuid: msg.Uuid}
+ return
+ }
+
+ chaincodeLogger.Debugf("Got keys and values. Sending %s", pb.ChaincodeMessage_RESPONSE)
+ serialSendMsg = &pb.ChaincodeMessage{Type: pb.ChaincodeMessage_RESPONSE, Payload: payloadBytes, Uuid: msg.Uuid}
+
+ }()
+}
+
+// afterRangeQueryState handles a RANGE_QUERY_STATE_CLOSE request from the chaincode.
+func (handler *Handler) afterRangeQueryStateClose(e *fsm.Event, state string) {
+ msg, ok := e.Args[0].(*pb.ChaincodeMessage)
+ if !ok {
+ e.Cancel(fmt.Errorf("Received unexpected message type"))
+ return
+ }
+ chaincodeLogger.Debugf("Received %s, invoking get state from ledger", pb.ChaincodeMessage_RANGE_QUERY_STATE)
+
+ // Query ledger for state
+ handler.handleRangeQueryStateClose(msg)
+ chaincodeLogger.Debug("Exiting RANGE_QUERY_STATE_CLOSE")
+}
+
+// Handles the closing of a state iterator
+func (handler *Handler) handleRangeQueryStateClose(msg *pb.ChaincodeMessage) {
+ // The defer followed by triggering a go routine dance is needed to ensure that the previous state transition
+ // is completed before the next one is triggered. The previous state transition is deemed complete only when
+ // the afterRangeQueryState function is exited. Interesting bug fix!!
+ go func() {
+ // Check if this is the unique state request from this chaincode uuid
+ uniqueReq := handler.createUUIDEntry(msg.Uuid)
+ if !uniqueReq {
+ // Drop this request
+ chaincodeLogger.Error("Another state request pending for this Uuid. Cannot process.")
+ return
+ }
+
+ var serialSendMsg *pb.ChaincodeMessage
+
+ defer func() {
+ handler.deleteUUIDEntry(msg.Uuid)
+ chaincodeLogger.Debugf("[%s]handleRangeQueryState serial send %s", shortuuid(serialSendMsg.Uuid), serialSendMsg.Type)
+ handler.serialSend(serialSendMsg)
+ }()
+
+ rangeQueryStateClose := &pb.RangeQueryStateClose{}
+ unmarshalErr := proto.Unmarshal(msg.Payload, rangeQueryStateClose)
+ if unmarshalErr != nil {
+ payload := []byte(unmarshalErr.Error())
+ chaincodeLogger.Errorf("Failed to unmarshall state range query close request. Sending %s", pb.ChaincodeMessage_ERROR)
+ serialSendMsg = &pb.ChaincodeMessage{Type: pb.ChaincodeMessage_ERROR, Payload: payload, Uuid: msg.Uuid}
+ return
+ }
+
+ txContext := handler.getTxContext(msg.Uuid)
+ iter := handler.getRangeQueryIterator(txContext, rangeQueryStateClose.ID)
+ if iter != nil {
+ iter.Close()
+ handler.deleteRangeQueryIterator(txContext, rangeQueryStateClose.ID)
+ }
+
+ payload := &pb.RangeQueryStateResponse{HasMore: false, ID: rangeQueryStateClose.ID}
+ payloadBytes, err := proto.Marshal(payload)
+ if err != nil {
+
+ // Send error msg back to chaincode. GetState will not trigger event
+ payload := []byte(err.Error())
+ chaincodeLogger.Errorf("Failed marshall resopnse. Sending %s", pb.ChaincodeMessage_ERROR)
+ serialSendMsg = &pb.ChaincodeMessage{Type: pb.ChaincodeMessage_ERROR, Payload: payload, Uuid: msg.Uuid}
+ return
+ }
+
+ chaincodeLogger.Debugf("Closed. Sending %s", pb.ChaincodeMessage_RESPONSE)
+ serialSendMsg = &pb.ChaincodeMessage{Type: pb.ChaincodeMessage_RESPONSE, Payload: payloadBytes, Uuid: msg.Uuid}
+
+ }()
+}
+
+// afterPutState handles a PUT_STATE request from the chaincode.
+func (handler *Handler) afterPutState(e *fsm.Event, state string) {
+ _, ok := e.Args[0].(*pb.ChaincodeMessage)
+ if !ok {
+ e.Cancel(fmt.Errorf("Received unexpected message type"))
+ return
+ }
+ chaincodeLogger.Debugf("Received %s in state %s, invoking put state to ledger", pb.ChaincodeMessage_PUT_STATE, state)
+
+ // Put state into ledger handled within enterBusyState
+}
+
+// afterDelState handles a DEL_STATE request from the chaincode.
+func (handler *Handler) afterDelState(e *fsm.Event, state string) {
+ _, ok := e.Args[0].(*pb.ChaincodeMessage)
+ if !ok {
+ e.Cancel(fmt.Errorf("Received unexpected message type"))
+ return
+ }
+ chaincodeLogger.Debugf("Received %s, invoking delete state from ledger", pb.ChaincodeMessage_DEL_STATE)
+
+ // Delete state from ledger handled within enterBusyState
+}
+
+// afterInvokeChaincode handles an INVOKE_CHAINCODE request from the chaincode.
+func (handler *Handler) afterInvokeChaincode(e *fsm.Event, state string) {
+ _, ok := e.Args[0].(*pb.ChaincodeMessage)
+ if !ok {
+ e.Cancel(fmt.Errorf("Received unexpected message type"))
+ return
+ }
+ chaincodeLogger.Debugf("Received %s in state %s, invoking another chaincode", pb.ChaincodeMessage_INVOKE_CHAINCODE, state)
+
+ // Invoke another chaincode handled within enterBusyState
+}
+
+// Handles request to ledger to put state
+func (handler *Handler) enterBusyState(e *fsm.Event, state string) {
+ go func() {
+ msg, _ := e.Args[0].(*pb.ChaincodeMessage)
+ // First check if this UUID is a transaction; error otherwise
+ if !handler.getIsTransaction(msg.Uuid) {
+ payload := []byte(fmt.Sprintf("Cannot handle %s in query context", msg.Type.String()))
+ chaincodeLogger.Errorf("[%s]Cannot handle %s in query context. Sending %s", shortuuid(msg.Uuid), msg.Type.String(), pb.ChaincodeMessage_ERROR)
+ errMsg := &pb.ChaincodeMessage{Type: pb.ChaincodeMessage_ERROR, Payload: payload, Uuid: msg.Uuid}
+ handler.triggerNextState(errMsg, true)
+ return
+ }
+
+ chaincodeLogger.Debugf("[%s]state is %s", shortuuid(msg.Uuid), state)
+ // Check if this is the unique request from this chaincode uuid
+ uniqueReq := handler.createUUIDEntry(msg.Uuid)
+ if !uniqueReq {
+ // Drop this request
+ chaincodeLogger.Error("Another request pending for this Uuid. Cannot process.")
+ return
+ }
+
+ var triggerNextStateMsg *pb.ChaincodeMessage
+
+ defer func() {
+ handler.deleteUUIDEntry(msg.Uuid)
+ chaincodeLogger.Debugf("[%s]enterBusyState trigger event %s", shortuuid(triggerNextStateMsg.Uuid), triggerNextStateMsg.Type)
+ handler.triggerNextState(triggerNextStateMsg, true)
+ }()
+
+ ledgerObj, ledgerErr := ledger.GetLedger()
+ if ledgerErr != nil {
+ // Send error msg back to chaincode and trigger event
+ payload := []byte(ledgerErr.Error())
+ chaincodeLogger.Errorf("[%s]Failed to handle %s. Sending %s", shortuuid(msg.Uuid), msg.Type.String(), pb.ChaincodeMessage_ERROR)
+ triggerNextStateMsg = &pb.ChaincodeMessage{Type: pb.ChaincodeMessage_ERROR, Payload: payload, Uuid: msg.Uuid}
+ return
+ }
+
+ chaincodeID := handler.ChaincodeID.Name
+ var err error
+ var res []byte
+
+ if msg.Type.String() == pb.ChaincodeMessage_PUT_STATE.String() {
+ putStateInfo := &pb.PutStateInfo{}
+ unmarshalErr := proto.Unmarshal(msg.Payload, putStateInfo)
+ if unmarshalErr != nil {
+ payload := []byte(unmarshalErr.Error())
+ chaincodeLogger.Errorf("[%s]Unable to decipher payload. Sending %s", shortuuid(msg.Uuid), pb.ChaincodeMessage_ERROR)
+ triggerNextStateMsg = &pb.ChaincodeMessage{Type: pb.ChaincodeMessage_ERROR, Payload: payload, Uuid: msg.Uuid}
+ return
+ }
+
+ var pVal []byte
+ // Encrypt the data if the confidential is enabled
+ if pVal, err = handler.encrypt(msg.Uuid, putStateInfo.Value); err == nil {
+ // Invoke ledger to put state
+ err = ledgerObj.SetState(chaincodeID, putStateInfo.Key, pVal)
+ }
+ } else if msg.Type.String() == pb.ChaincodeMessage_DEL_STATE.String() {
+ // Invoke ledger to delete state
+ key := string(msg.Payload)
+ err = ledgerObj.DeleteState(chaincodeID, key)
+ } else if msg.Type.String() == pb.ChaincodeMessage_INVOKE_CHAINCODE.String() {
+ //check and prohibit C-call-C for CONFIDENTIAL txs
+ if triggerNextStateMsg = handler.canCallChaincode(msg.Uuid); triggerNextStateMsg != nil {
+ return
+ }
+ chaincodeSpec := &pb.ChaincodeSpec{}
+ unmarshalErr := proto.Unmarshal(msg.Payload, chaincodeSpec)
+ if unmarshalErr != nil {
+ payload := []byte(unmarshalErr.Error())
+ chaincodeLogger.Errorf("[%s]Unable to decipher payload. Sending %s", shortuuid(msg.Uuid), pb.ChaincodeMessage_ERROR)
+ triggerNextStateMsg = &pb.ChaincodeMessage{Type: pb.ChaincodeMessage_ERROR, Payload: payload, Uuid: msg.Uuid}
+ return
+ }
+
+ // Get the chaincodeID to invoke
+ newChaincodeID := chaincodeSpec.ChaincodeID.Name
+
+ // Create the transaction object
+ chaincodeInvocationSpec := &pb.ChaincodeInvocationSpec{ChaincodeSpec: chaincodeSpec}
+ transaction, _ := pb.NewChaincodeExecute(chaincodeInvocationSpec, msg.Uuid, pb.Transaction_CHAINCODE_INVOKE)
+
+ // Launch the new chaincode if not already running
+ _, chaincodeInput, launchErr := handler.chaincodeSupport.Launch(context.Background(), transaction)
+ if launchErr != nil {
+ payload := []byte(launchErr.Error())
+ chaincodeLogger.Errorf("[%s]Failed to launch invoked chaincode. Sending %s", shortuuid(msg.Uuid), pb.ChaincodeMessage_ERROR)
+ triggerNextStateMsg = &pb.ChaincodeMessage{Type: pb.ChaincodeMessage_ERROR, Payload: payload, Uuid: msg.Uuid}
+ return
+ }
+
+ // TODO: Need to handle timeout correctly
+ timeout := time.Duration(30000) * time.Millisecond
+
+ ccMsg, _ := createTransactionMessage(transaction.Uuid, chaincodeInput)
+
+ // Execute the chaincode
+ //NOTE: when confidential C-call-C is understood, transaction should have the correct sec context for enc/dec
+ response, execErr := handler.chaincodeSupport.Execute(context.Background(), newChaincodeID, ccMsg, timeout, transaction)
+
+ //payload is marshalled and send to the calling chaincode's shim which unmarshals and
+ //sends it to chaincode
+ res = nil
+ if execErr != nil {
+ err = execErr
+ } else {
+ res, err = proto.Marshal(response)
+ }
+ }
+
+ if err != nil {
+ // Send error msg back to chaincode and trigger event
+ payload := []byte(err.Error())
+ chaincodeLogger.Errorf("[%s]Failed to handle %s. Sending %s", shortuuid(msg.Uuid), msg.Type.String(), pb.ChaincodeMessage_ERROR)
+ triggerNextStateMsg = &pb.ChaincodeMessage{Type: pb.ChaincodeMessage_ERROR, Payload: payload, Uuid: msg.Uuid}
+ return
+ }
+
+ // Send response msg back to chaincode.
+ chaincodeLogger.Debugf("[%s]Completed %s. Sending %s", shortuuid(msg.Uuid), msg.Type.String(), pb.ChaincodeMessage_RESPONSE)
+ triggerNextStateMsg = &pb.ChaincodeMessage{Type: pb.ChaincodeMessage_RESPONSE, Payload: res, Uuid: msg.Uuid}
+ }()
+}
+
+func (handler *Handler) enterEstablishedState(e *fsm.Event, state string) {
+ handler.notifyDuringStartup(true)
+}
+
+func (handler *Handler) enterInitState(e *fsm.Event, state string) {
+ ccMsg, ok := e.Args[0].(*pb.ChaincodeMessage)
+ if !ok {
+ e.Cancel(fmt.Errorf("Received unexpected message type"))
+ return
+ }
+ chaincodeLogger.Debugf("[%s]Entered state %s", shortuuid(ccMsg.Uuid), state)
+ //very first time entering init state from established, send message to chaincode
+ if ccMsg.Type == pb.ChaincodeMessage_INIT {
+ // Mark isTransaction to allow put/del state and invoke other chaincodes
+ handler.markIsTransaction(ccMsg.Uuid, true)
+ if err := handler.serialSend(ccMsg); err != nil {
+ errMsg := &pb.ChaincodeMessage{Type: pb.ChaincodeMessage_ERROR, Payload: []byte(fmt.Sprintf("Error sending %s: %s", pb.ChaincodeMessage_INIT, err)), Uuid: ccMsg.Uuid}
+ handler.notify(errMsg)
+ }
+ }
+}
+
+func (handler *Handler) enterReadyState(e *fsm.Event, state string) {
+ // Now notify
+ msg, ok := e.Args[0].(*pb.ChaincodeMessage)
+ //we have to encrypt chaincode event payload. We cannot encrypt event type as
+ //it is needed by the event system to filter clients by
+ if ok && msg.ChaincodeEvent != nil && msg.ChaincodeEvent.Payload != nil {
+ var err error
+ if msg.Payload, err = handler.encrypt(msg.Uuid, msg.Payload); nil != err {
+ chaincodeLogger.Errorf("[%s]Failed to encrypt chaincode event payload", msg.Uuid)
+ msg.Payload = []byte(fmt.Sprintf("Failed to encrypt chaincode event payload %s", err.Error()))
+ msg.Type = pb.ChaincodeMessage_ERROR
+ }
+ }
+ handler.deleteIsTransaction(msg.Uuid)
+ if !ok {
+ e.Cancel(fmt.Errorf("Received unexpected message type"))
+ return
+ }
+ chaincodeLogger.Debugf("[%s]Entered state %s", shortuuid(msg.Uuid), state)
+ handler.notify(msg)
+}
+
+func (handler *Handler) enterEndState(e *fsm.Event, state string) {
+ defer handler.deregister()
+ // Now notify
+ msg, ok := e.Args[0].(*pb.ChaincodeMessage)
+ handler.deleteIsTransaction(msg.Uuid)
+ if !ok {
+ e.Cancel(fmt.Errorf("Received unexpected message type"))
+ return
+ }
+ chaincodeLogger.Debugf("[%s]Entered state %s", shortuuid(msg.Uuid), state)
+ handler.notify(msg)
+ e.Cancel(fmt.Errorf("Entered end state"))
+}
+
+func (handler *Handler) cloneTx(tx *pb.Transaction) (*pb.Transaction, error) {
+ raw, err := proto.Marshal(tx)
+ if err != nil {
+ chaincodeLogger.Errorf("Failed marshalling transaction [%s].", err.Error())
+ return nil, err
+ }
+
+ clone := &pb.Transaction{}
+ err = proto.Unmarshal(raw, clone)
+ if err != nil {
+ chaincodeLogger.Errorf("Failed unmarshalling transaction [%s].", err.Error())
+ return nil, err
+ }
+
+ return clone, nil
+}
+
+func (handler *Handler) initializeSecContext(tx, depTx *pb.Transaction) error {
+ //set deploy transaction on the handler
+ if depTx != nil {
+ //we are given a deep clone of depTx.. Just use it
+ handler.deployTXSecContext = depTx
+ } else {
+ //nil depTx => tx is a deploy transaction, clone it
+ var err error
+ handler.deployTXSecContext, err = handler.cloneTx(tx)
+ if err != nil {
+ return fmt.Errorf("Failed to clone transaction: %s\n", err)
+ }
+ }
+
+ //don't need the payload which is not useful and rather large
+ handler.deployTXSecContext.Payload = nil
+
+ //we need to null out path from depTx as invoke or queries don't have it
+ cID := &pb.ChaincodeID{}
+ err := proto.Unmarshal(handler.deployTXSecContext.ChaincodeID, cID)
+ if err != nil {
+ return fmt.Errorf("Failed to unmarshall : %s\n", err)
+ }
+
+ cID.Path = ""
+ data, err := proto.Marshal(cID)
+ if err != nil {
+ return fmt.Errorf("Failed to marshall : %s\n", err)
+ }
+
+ handler.deployTXSecContext.ChaincodeID = data
+
+ return nil
+}
+
+func (handler *Handler) setChaincodeSecurityContext(tx *pb.Transaction, msg *pb.ChaincodeMessage) error {
+ chaincodeLogger.Debug("setting chaincode security context...")
+ if msg.SecurityContext == nil {
+ msg.SecurityContext = &pb.ChaincodeSecurityContext{}
+ }
+ if tx != nil {
+ chaincodeLogger.Debug("setting chaincode security context. Transaction different from nil")
+ chaincodeLogger.Debugf("setting chaincode security context. Metadata [% x]", tx.Metadata)
+
+ msg.SecurityContext.CallerCert = tx.Cert
+ msg.SecurityContext.CallerSign = tx.Signature
+ binding, err := handler.getSecurityBinding(tx)
+ if err != nil {
+ chaincodeLogger.Errorf("Failed getting binding [%s]", err)
+ return err
+ }
+ msg.SecurityContext.Binding = binding
+ msg.SecurityContext.Metadata = tx.Metadata
+
+ if tx.Type == pb.Transaction_CHAINCODE_INVOKE || tx.Type == pb.Transaction_CHAINCODE_QUERY {
+ cis := &pb.ChaincodeInvocationSpec{}
+ if err := proto.Unmarshal(tx.Payload, cis); err != nil {
+ chaincodeLogger.Errorf("Failed getting payload [%s]", err)
+ return err
+ }
+
+ ctorMsgRaw, err := proto.Marshal(cis.ChaincodeSpec.GetCtorMsg())
+ if err != nil {
+ chaincodeLogger.Errorf("Failed getting ctorMsgRaw [%s]", err)
+ return err
+ }
+
+ msg.SecurityContext.Payload = ctorMsgRaw
+ }
+ msg.SecurityContext.TxTimestamp = tx.Timestamp
+ }
+ return nil
+}
+
+//if initArgs is set (should be for "deploy" only) move to Init
+//else move to ready
+func (handler *Handler) initOrReady(uuid string, f *string, initArgs []string, tx *pb.Transaction, depTx *pb.Transaction) (chan *pb.ChaincodeMessage, error) {
+ var ccMsg *pb.ChaincodeMessage
+ var send bool
+
+ txctx, funcErr := handler.createTxContext(uuid, tx)
+ if funcErr != nil {
+ return nil, funcErr
+ }
+
+ notfy := txctx.responseNotifier
+
+ if f != nil || initArgs != nil {
+ chaincodeLogger.Debug("sending INIT")
+ var f2 string
+ if f != nil {
+ f2 = *f
+ }
+ funcArgsMsg := &pb.ChaincodeInput{Function: f2, Args: initArgs}
+ var payload []byte
+ if payload, funcErr = proto.Marshal(funcArgsMsg); funcErr != nil {
+ handler.deleteTxContext(uuid)
+ return nil, fmt.Errorf("Failed to marshall %s : %s\n", ccMsg.Type.String(), funcErr)
+ }
+ ccMsg = &pb.ChaincodeMessage{Type: pb.ChaincodeMessage_INIT, Payload: payload, Uuid: uuid}
+ send = false
+ } else {
+ chaincodeLogger.Debug("sending READY")
+ ccMsg = &pb.ChaincodeMessage{Type: pb.ChaincodeMessage_READY, Uuid: uuid}
+ send = true
+ }
+
+ if err := handler.initializeSecContext(tx, depTx); err != nil {
+ handler.deleteTxContext(uuid)
+ return nil, err
+ }
+
+ //if security is disabled the context elements will just be nil
+ if err := handler.setChaincodeSecurityContext(tx, ccMsg); err != nil {
+ return nil, err
+ }
+
+ handler.triggerNextState(ccMsg, send)
+
+ return notfy, nil
+}
+
+// Handles request to query another chaincode
+func (handler *Handler) handleQueryChaincode(msg *pb.ChaincodeMessage) {
+ go func() {
+ // Check if this is the unique request from this chaincode uuid
+ uniqueReq := handler.createUUIDEntry(msg.Uuid)
+ if !uniqueReq {
+ // Drop this request
+ chaincodeLogger.Errorf("[%s]Another request pending for this Uuid. Cannot process.", shortuuid(msg.Uuid))
+ return
+ }
+
+ var serialSendMsg *pb.ChaincodeMessage
+
+ defer func() {
+ handler.deleteUUIDEntry(msg.Uuid)
+ handler.serialSend(serialSendMsg)
+ }()
+
+ //check and prohibit C-call-C for CONFIDENTIAL txs
+ if serialSendMsg = handler.canCallChaincode(msg.Uuid); serialSendMsg != nil {
+ return
+ }
+
+ chaincodeSpec := &pb.ChaincodeSpec{}
+ unmarshalErr := proto.Unmarshal(msg.Payload, chaincodeSpec)
+ if unmarshalErr != nil {
+ payload := []byte(unmarshalErr.Error())
+ chaincodeLogger.Errorf("[%s]Unable to decipher payload. Sending %s", shortuuid(msg.Uuid), pb.ChaincodeMessage_ERROR)
+ serialSendMsg = &pb.ChaincodeMessage{Type: pb.ChaincodeMessage_ERROR, Payload: payload, Uuid: msg.Uuid}
+ return
+ }
+
+ // Get the chaincodeID to invoke
+ newChaincodeID := chaincodeSpec.ChaincodeID.Name
+
+ // Create the transaction object
+ chaincodeInvocationSpec := &pb.ChaincodeInvocationSpec{ChaincodeSpec: chaincodeSpec}
+ transaction, _ := pb.NewChaincodeExecute(chaincodeInvocationSpec, msg.Uuid, pb.Transaction_CHAINCODE_QUERY)
+
+ // Launch the new chaincode if not already running
+ _, chaincodeInput, launchErr := handler.chaincodeSupport.Launch(context.Background(), transaction)
+ if launchErr != nil {
+ payload := []byte(launchErr.Error())
+ chaincodeLogger.Errorf("[%s]Failed to launch invoked chaincode. Sending %s", shortuuid(msg.Uuid), pb.ChaincodeMessage_ERROR)
+ serialSendMsg = &pb.ChaincodeMessage{Type: pb.ChaincodeMessage_ERROR, Payload: payload, Uuid: msg.Uuid}
+ return
+ }
+
+ // TODO: Need to handle timeout correctly
+ timeout := time.Duration(30000) * time.Millisecond
+
+ ccMsg, _ := createQueryMessage(transaction.Uuid, chaincodeInput)
+
+ // Query the chaincode
+ //NOTE: when confidential C-call-C is understood, transaction should have the correct sec context for enc/dec
+ response, execErr := handler.chaincodeSupport.Execute(context.Background(), newChaincodeID, ccMsg, timeout, transaction)
+
+ if execErr != nil {
+ // Send error msg back to chaincode and trigger event
+ payload := []byte(execErr.Error())
+ chaincodeLogger.Errorf("[%s]Failed to handle %s. Sending %s", shortuuid(msg.Uuid), msg.Type.String(), pb.ChaincodeMessage_ERROR)
+ serialSendMsg = &pb.ChaincodeMessage{Type: pb.ChaincodeMessage_ERROR, Payload: payload, Uuid: msg.Uuid}
+ return
+ }
+
+ // Send response msg back to chaincode.
+
+ //this is need to send the payload directly to calling chaincode without
+ //interpreting (in particular, don't look for errors)
+ if respBytes, err := proto.Marshal(response); err != nil {
+ chaincodeLogger.Errorf("[%s]Error marshaling response. Sending %s", shortuuid(msg.Uuid), pb.ChaincodeMessage_ERROR)
+ payload := []byte(execErr.Error())
+ serialSendMsg = &pb.ChaincodeMessage{Type: pb.ChaincodeMessage_ERROR, Payload: payload, Uuid: msg.Uuid}
+ } else {
+ chaincodeLogger.Debugf("[%s]Completed %s. Sending %s", shortuuid(msg.Uuid), msg.Type.String(), pb.ChaincodeMessage_RESPONSE)
+ serialSendMsg = &pb.ChaincodeMessage{Type: pb.ChaincodeMessage_RESPONSE, Payload: respBytes, Uuid: msg.Uuid}
+ }
+ }()
+}
+
+// HandleMessage implementation of MessageHandler interface. Peer's handling of Chaincode messages.
+func (handler *Handler) HandleMessage(msg *pb.ChaincodeMessage) error {
+ chaincodeLogger.Debugf("[%s]Handling ChaincodeMessage of type: %s in state %s", shortuuid(msg.Uuid), msg.Type, handler.FSM.Current())
+
+ //QUERY_COMPLETED message can happen ONLY for Transaction_QUERY (stateless)
+ if msg.Type == pb.ChaincodeMessage_QUERY_COMPLETED {
+ chaincodeLogger.Debugf("[%s]HandleMessage- QUERY_COMPLETED. Notify", msg.Uuid)
+ handler.deleteIsTransaction(msg.Uuid)
+ var err error
+ if msg.Payload, err = handler.encrypt(msg.Uuid, msg.Payload); nil != err {
+ chaincodeLogger.Errorf("[%s]Failed to encrypt query result %s", msg.Uuid, string(msg.Payload))
+ msg.Payload = []byte(fmt.Sprintf("Failed to encrypt query result %s", err.Error()))
+ msg.Type = pb.ChaincodeMessage_QUERY_ERROR
+ }
+ handler.notify(msg)
+ return nil
+ } else if msg.Type == pb.ChaincodeMessage_QUERY_ERROR {
+ chaincodeLogger.Debugf("[%s]HandleMessage- QUERY_ERROR (%s). Notify", msg.Uuid, string(msg.Payload))
+ handler.deleteIsTransaction(msg.Uuid)
+ handler.notify(msg)
+ return nil
+ } else if msg.Type == pb.ChaincodeMessage_INVOKE_QUERY {
+ // Received request to query another chaincode from shim
+ chaincodeLogger.Debugf("[%s]HandleMessage- Received request to query another chaincode", msg.Uuid)
+ handler.handleQueryChaincode(msg)
+ return nil
+ }
+ if handler.FSM.Cannot(msg.Type.String()) {
+ // Check if this is a request from validator in query context
+ if msg.Type.String() == pb.ChaincodeMessage_PUT_STATE.String() || msg.Type.String() == pb.ChaincodeMessage_DEL_STATE.String() || msg.Type.String() == pb.ChaincodeMessage_INVOKE_CHAINCODE.String() {
+ // Check if this UUID is a transaction
+ if !handler.getIsTransaction(msg.Uuid) {
+ payload := []byte(fmt.Sprintf("[%s]Cannot handle %s in query context", msg.Uuid, msg.Type.String()))
+ chaincodeLogger.Errorf("[%s]Cannot handle %s in query context. Sending %s", msg.Uuid, msg.Type.String(), pb.ChaincodeMessage_ERROR)
+ errMsg := &pb.ChaincodeMessage{Type: pb.ChaincodeMessage_ERROR, Payload: payload, Uuid: msg.Uuid}
+ handler.serialSend(errMsg)
+ return fmt.Errorf("Cannot handle %s in query context", msg.Type.String())
+ }
+ }
+
+ // Other errors
+ return fmt.Errorf("[%s]Chaincode handler validator FSM cannot handle message (%s) with payload size (%d) while in state: %s", msg.Uuid, msg.Type.String(), len(msg.Payload), handler.FSM.Current())
+ }
+ eventErr := handler.FSM.Event(msg.Type.String(), msg)
+ filteredErr := filterError(eventErr)
+ if filteredErr != nil {
+ chaincodeLogger.Errorf("[%s]Failed to trigger FSM event %s: %s", msg.Uuid, msg.Type.String(), filteredErr)
+ }
+
+ return filteredErr
+}
+
+// Filter the Errors to allow NoTransitionError and CanceledError to not propagate for cases where embedded Err == nil
+func filterError(errFromFSMEvent error) error {
+ if errFromFSMEvent != nil {
+ if noTransitionErr, ok := errFromFSMEvent.(*fsm.NoTransitionError); ok {
+ if noTransitionErr.Err != nil {
+ // Squash the NoTransitionError
+ return errFromFSMEvent
+ }
+ chaincodeLogger.Debugf("Ignoring NoTransitionError: %s", noTransitionErr)
+ }
+ if canceledErr, ok := errFromFSMEvent.(*fsm.CanceledError); ok {
+ if canceledErr.Err != nil {
+ // Squash the CanceledError
+ return canceledErr
+ }
+ chaincodeLogger.Debugf("Ignoring CanceledError: %s", canceledErr)
+ }
+ }
+ return nil
+}
+
+func (handler *Handler) sendExecuteMessage(msg *pb.ChaincodeMessage, tx *pb.Transaction) (chan *pb.ChaincodeMessage, error) {
+ txctx, err := handler.createTxContext(msg.Uuid, tx)
+ if err != nil {
+ return nil, err
+ }
+
+ // Mark UUID as either transaction or query
+ chaincodeLogger.Debugf("[%s]Inside sendExecuteMessage. Message %s", shortuuid(msg.Uuid), msg.Type.String())
+ if msg.Type.String() == pb.ChaincodeMessage_QUERY.String() {
+ handler.markIsTransaction(msg.Uuid, false)
+ } else {
+ handler.markIsTransaction(msg.Uuid, true)
+ }
+
+ //if security is disabled the context elements will just be nil
+ if err := handler.setChaincodeSecurityContext(tx, msg); err != nil {
+ return nil, err
+ }
+
+ // Trigger FSM event if it is a transaction
+ if msg.Type.String() == pb.ChaincodeMessage_TRANSACTION.String() {
+ chaincodeLogger.Debugf("[%s]sendExecuteMsg trigger event %s", shortuuid(msg.Uuid), msg.Type)
+ handler.triggerNextState(msg, true)
+ } else {
+ // Send the message to shim
+ chaincodeLogger.Debugf("[%s]sending query", shortuuid(msg.Uuid))
+ if err = handler.serialSend(msg); err != nil {
+ handler.deleteTxContext(msg.Uuid)
+ return nil, fmt.Errorf("[%s]SendMessage error sending (%s)", shortuuid(msg.Uuid), err)
+ }
+ }
+
+ return txctx.responseNotifier, nil
+}
+
+func (handler *Handler) isRunning() bool {
+ switch handler.FSM.Current() {
+ case createdstate:
+ fallthrough
+ case establishedstate:
+ fallthrough
+ case initstate:
+ return false
+ default:
+ return true
+ }
+}
+
+/****************
+func (handler *Handler) initEvent() (chan *pb.ChaincodeMessage, error) {
+ if handler.responseNotifiers == nil {
+ return nil,fmt.Errorf("SendMessage called before registration for Uuid:%s", msg.Uuid)
+ }
+ var notfy chan *pb.ChaincodeMessage
+ handler.Lock()
+ if handler.responseNotifiers[msg.Uuid] != nil {
+ handler.Unlock()
+ return nil, fmt.Errorf("SendMessage Uuid:%s exists", msg.Uuid)
+ }
+ //note the explicit use of buffer 1. We won't block if the receiver times outi and does not wait
+ //for our response
+ handler.responseNotifiers[msg.Uuid] = make(chan *pb.ChaincodeMessage, 1)
+ handler.Unlock()
+
+ if err := c.serialSend(msg); err != nil {
+ deleteNotifier(msg.Uuid)
+ return nil, fmt.Errorf("SendMessage error sending %s(%s)", msg.Uuid, err)
+ }
+ return notfy, nil
+}
+*******************/
diff --git a/core/chaincode/platforms/car/hash.go b/core/chaincode/platforms/car/hash.go
new file mode 100644
index 00000000000..2d3ff0d74d5
--- /dev/null
+++ b/core/chaincode/platforms/car/hash.go
@@ -0,0 +1,54 @@
+/*
+Copyright IBM Corp. 2016 All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package car
+
+import (
+ "encoding/hex"
+ "fmt"
+ "io/ioutil"
+
+ "github.com/hyperledger/fabric/core/util"
+ pb "github.com/hyperledger/fabric/protos"
+)
+
+//generateHashcode gets hashcode of the code under path. If path is a HTTP(s) url
+//it downloads the code first to compute the hash.
+//NOTE: for dev mode, user builds and runs chaincode manually. The name provided
+//by the user is equivalent to the path. This method will treat the name
+//as codebytes and compute the hash from it. ie, user cannot run the chaincode
+//with the same (name, ctor, args)
+func generateHashcode(spec *pb.ChaincodeSpec, path string) (string, error) {
+
+ ctor := spec.CtorMsg
+ if ctor == nil || ctor.Function == "" {
+ return "", fmt.Errorf("Cannot generate hashcode from empty ctor")
+ }
+
+ hash := util.GenerateHashFromSignature(spec.ChaincodeID.Path, ctor.Function, ctor.Args)
+
+ buf, err := ioutil.ReadFile(path)
+ if err != nil {
+ return "", fmt.Errorf("Error reading file: %s", err)
+ }
+
+ newSlice := make([]byte, len(hash)+len(buf))
+ copy(newSlice[len(buf):], hash[:])
+ //hash = md5.Sum(newSlice)
+ hash = util.ComputeCryptoHash(newSlice)
+
+ return hex.EncodeToString(hash[:]), nil
+}
diff --git a/core/chaincode/platforms/car/package.go b/core/chaincode/platforms/car/package.go
new file mode 100644
index 00000000000..017c7816308
--- /dev/null
+++ b/core/chaincode/platforms/car/package.go
@@ -0,0 +1,99 @@
+/*
+Copyright IBM Corp. 2016 All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package car
+
+import (
+ "archive/tar"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "net/http"
+ "os"
+ "strings"
+ "time"
+
+ cutil "github.com/hyperledger/fabric/core/container/util"
+ pb "github.com/hyperledger/fabric/protos"
+ "github.com/spf13/viper"
+)
+
+func download(path string) (string, error) {
+ if strings.HasPrefix(path, "http://") {
+ // The file is remote, so we need to download it to a temporary location first
+
+ var tmp *os.File
+ var err error
+ tmp, err = ioutil.TempFile("", "car")
+ if err != nil {
+ return "", fmt.Errorf("Error creating temporary file: %s", err)
+ }
+ defer os.Remove(tmp.Name())
+ defer tmp.Close()
+
+ resp, err := http.Get(path)
+ if err != nil {
+ return "", fmt.Errorf("Error with HTTP GET: %s", err)
+ }
+ defer resp.Body.Close()
+
+ _, err = io.Copy(tmp, resp.Body)
+ if err != nil {
+ return "", fmt.Errorf("Error downloading bytes: %s", err)
+ }
+
+ return tmp.Name(), nil
+ }
+
+ return path, nil
+}
+
+// WritePackage satisfies the platform interface for generating a docker package
+// that encapsulates the environment for a CAR based chaincode
+func (carPlatform *Platform) WritePackage(spec *pb.ChaincodeSpec, tw *tar.Writer) error {
+
+ path, err := download(spec.ChaincodeID.Path)
+ if err != nil {
+ return err
+ }
+
+ spec.ChaincodeID.Name, err = generateHashcode(spec, path)
+ if err != nil {
+ return fmt.Errorf("Error generating hashcode: %s", err)
+ }
+
+ var buf []string
+
+ //let the executable's name be chaincode ID's name
+ buf = append(buf, viper.GetString("chaincode.car.Dockerfile"))
+ buf = append(buf, "COPY package.car /tmp/package.car")
+ buf = append(buf, fmt.Sprintf("RUN chaintool buildcar /tmp/package.car -o $GOPATH/bin/%s && rm /tmp/package.car", spec.ChaincodeID.Name))
+
+ dockerFileContents := strings.Join(buf, "\n")
+ dockerFileSize := int64(len([]byte(dockerFileContents)))
+
+ //Make headers identical by using zero time
+ var zeroTime time.Time
+ tw.WriteHeader(&tar.Header{Name: "Dockerfile", Size: dockerFileSize, ModTime: zeroTime, AccessTime: zeroTime, ChangeTime: zeroTime})
+ tw.Write([]byte(dockerFileContents))
+
+ err = cutil.WriteFileToPackage(path, "package.car", tw)
+ if err != nil {
+ return err
+ }
+
+ return nil
+}
diff --git a/core/chaincode/platforms/car/platform.go b/core/chaincode/platforms/car/platform.go
new file mode 100644
index 00000000000..8c1c4c9f75d
--- /dev/null
+++ b/core/chaincode/platforms/car/platform.go
@@ -0,0 +1,32 @@
+/*
+Copyright IBM Corp. 2016 All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package car
+
+import (
+ pb "github.com/hyperledger/fabric/protos"
+)
+
+// Platform for the CAR type
+type Platform struct {
+}
+
+// ValidateSpec validates the chaincode specification for CAR types to satisfy
+// the platform interface. This chaincode type currently doesn't
+// require anything specific so we just implicitly approve any spec
+func (carPlatform *Platform) ValidateSpec(spec *pb.ChaincodeSpec) error {
+ return nil
+}
diff --git a/core/chaincode/platforms/car/test/car_test.go b/core/chaincode/platforms/car/test/car_test.go
new file mode 100644
index 00000000000..18cf00ef938
--- /dev/null
+++ b/core/chaincode/platforms/car/test/car_test.go
@@ -0,0 +1,54 @@
+/*
+Copyright IBM Corp. 2016 All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package test
+
+import (
+ "os"
+ "testing"
+
+ "github.com/hyperledger/fabric/core/config"
+ "github.com/hyperledger/fabric/core/container"
+ pb "github.com/hyperledger/fabric/protos"
+)
+
+func TestMain(m *testing.M) {
+ config.SetupTestConfig("../../../../../peer")
+ os.Exit(m.Run())
+}
+
+func TestCar_BuildImage(t *testing.T) {
+ vm, err := container.NewVM()
+ if err != nil {
+ t.Fail()
+ t.Logf("Error getting VM: %s", err)
+ return
+ }
+ // Build the spec
+ cwd, err := os.Getwd()
+ if err != nil {
+ t.Fail()
+ t.Logf("Error getting CWD: %s", err)
+ return
+ }
+
+ chaincodePath := cwd + "/org.hyperledger.chaincode.example02-0.1-SNAPSHOT.car"
+ spec := &pb.ChaincodeSpec{Type: pb.ChaincodeSpec_CAR, ChaincodeID: &pb.ChaincodeID{Path: chaincodePath}, CtorMsg: &pb.ChaincodeInput{Function: "f"}}
+ if _, err := vm.BuildChaincodeContainer(spec); err != nil {
+ t.Fail()
+ t.Log(err)
+ }
+}
diff --git a/core/chaincode/platforms/car/test/org.hyperledger.chaincode.example02-0.1-SNAPSHOT.car b/core/chaincode/platforms/car/test/org.hyperledger.chaincode.example02-0.1-SNAPSHOT.car
new file mode 100644
index 00000000000..ce7ca3f7dd9
Binary files /dev/null and b/core/chaincode/platforms/car/test/org.hyperledger.chaincode.example02-0.1-SNAPSHOT.car differ
diff --git a/core/chaincode/platforms/golang/hash.go b/core/chaincode/platforms/golang/hash.go
new file mode 100755
index 00000000000..d5fa1b4aec4
--- /dev/null
+++ b/core/chaincode/platforms/golang/hash.go
@@ -0,0 +1,279 @@
+/*
+Copyright IBM Corp. 2016 All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package golang
+
+import (
+ "archive/tar"
+ "bytes"
+ "encoding/hex"
+ "errors"
+ "fmt"
+ "io/ioutil"
+ "os"
+ "os/exec"
+ "path/filepath"
+ "strings"
+ "time"
+
+ "github.com/op/go-logging"
+ "github.com/spf13/viper"
+
+ cutil "github.com/hyperledger/fabric/core/container/util"
+ "github.com/hyperledger/fabric/core/util"
+ pb "github.com/hyperledger/fabric/protos"
+)
+
+var logger = logging.MustGetLogger("golang/hash")
+
+//core hash computation factored out for testing
+func computeHash(contents []byte, hash []byte) []byte {
+ newSlice := make([]byte, len(hash)+len(contents))
+
+ //copy the contents
+ copy(newSlice[0:len(contents)], contents[:])
+
+ //add the previous hash
+ copy(newSlice[len(contents):], hash[:])
+
+ //compute new hash
+ hash = util.ComputeCryptoHash(newSlice)
+
+ return hash
+}
+
+//hashFilesInDir computes h=hash(h,file bytes) for each file in a directory
+//Directory entries are traversed recursively. In the end a single
+//hash value is returned for the entire directory structure
+func hashFilesInDir(rootDir string, dir string, hash []byte, tw *tar.Writer) ([]byte, error) {
+ currentDir := filepath.Join(rootDir, dir)
+ logger.Debugf("hashFiles %s", currentDir)
+ //ReadDir returns sorted list of files in dir
+ fis, err := ioutil.ReadDir(currentDir)
+ if err != nil {
+ return hash, fmt.Errorf("ReadDir failed %s\n", err)
+ }
+ for _, fi := range fis {
+ name := filepath.Join(dir, fi.Name())
+ if fi.IsDir() {
+ var err error
+ hash, err = hashFilesInDir(rootDir, name, hash, tw)
+ if err != nil {
+ return hash, err
+ }
+ continue
+ }
+ fqp := filepath.Join(rootDir, name)
+ buf, err := ioutil.ReadFile(fqp)
+ if err != nil {
+ fmt.Printf("Error reading %s\n", err)
+ return hash, err
+ }
+
+ //get the new hash from file contents
+ hash = computeHash(buf, hash)
+
+ if tw != nil {
+ is := bytes.NewReader(buf)
+ if err = cutil.WriteStreamToPackage(is, fqp, filepath.Join("src", name), tw); err != nil {
+ return hash, fmt.Errorf("Error adding file to tar %s", err)
+ }
+ }
+ }
+ return hash, nil
+}
+
+func isCodeExist(tmppath string) error {
+ file, err := os.Open(tmppath)
+ if err != nil {
+ return fmt.Errorf("Download failed %s", err)
+ }
+
+ fi, err := file.Stat()
+ if err != nil {
+ return fmt.Errorf("Could not stat file %s", err)
+ }
+
+ if !fi.IsDir() {
+ return fmt.Errorf("File %s is not dir\n", file.Name())
+ }
+
+ return nil
+}
+
+func getCodeFromHTTP(path string) (codegopath string, err error) {
+ codegopath = ""
+ err = nil
+ logger.Debugf("getCodeFromHTTP %s", path)
+
+ // The following could be done with os.Getenv("GOPATH") but we need to change it later so this prepares for that next step
+ env := os.Environ()
+ var origgopath string
+ var gopathenvIndex int
+ for i, v := range env {
+ if strings.Index(v, "GOPATH=") == 0 {
+ p := strings.SplitAfter(v, "GOPATH=")
+ origgopath = p[1]
+ gopathenvIndex = i
+ break
+ }
+ }
+ if origgopath == "" {
+ err = fmt.Errorf("GOPATH not defined")
+ return
+ }
+ // Only take the first element of GOPATH
+ gopath := filepath.SplitList(origgopath)[0]
+
+ // Define a new gopath in which to download the code
+ newgopath := filepath.Join(gopath, "_usercode_")
+
+ //ignore errors.. _usercode_ might exist. TempDir will catch any other errors
+ os.Mkdir(newgopath, 0755)
+
+ if codegopath, err = ioutil.TempDir(newgopath, ""); err != nil {
+ err = fmt.Errorf("could not create tmp dir under %s(%s)", newgopath, err)
+ return
+ }
+
+ //go paths can have multiple dirs. We create a GOPATH with two source tree's as follows
+ //
+ // :
+ //
+ //This approach has several goodness:
+ // . Go will pick the first path to download user code (which we will delete after processing)
+ // . GO will not download OBC as it is in the second path. GO will use the local OBC for generating chaincode image
+ // . network savings
+ // . more secure
+ // . as we are not downloading OBC, private, password-protected OBC repo's become non-issue
+
+ env[gopathenvIndex] = "GOPATH=" + codegopath + string(os.PathListSeparator) + origgopath
+
+ // Use a 'go get' command to pull the chaincode from the given repo
+ logger.Debugf("go get %s", path)
+ cmd := exec.Command("go", "get", path)
+ cmd.Env = env
+ var out bytes.Buffer
+ cmd.Stdout = &out
+ var errBuf bytes.Buffer
+ cmd.Stderr = &errBuf //capture Stderr and print it on error
+ err = cmd.Start()
+
+ // Create a go routine that will wait for the command to finish
+ done := make(chan error, 1)
+ go func() {
+ done <- cmd.Wait()
+ }()
+
+ select {
+ case <-time.After(time.Duration(viper.GetInt("chaincode.deploytimeout")) * time.Millisecond):
+ // If pulling repos takes too long, we should give up
+ // (This can happen if a repo is private and the git clone asks for credentials)
+ if err = cmd.Process.Kill(); err != nil {
+ err = fmt.Errorf("failed to kill: %s", err)
+ } else {
+ err = errors.New("Getting chaincode took too long")
+ }
+ case err = <-done:
+ // If we're here, the 'go get' command must have finished
+ if err != nil {
+ err = fmt.Errorf("'go get' failed with error: \"%s\"\n%s", err, string(errBuf.Bytes()))
+ }
+ }
+ return
+}
+
+func getCodeFromFS(path string) (codegopath string, err error) {
+ logger.Debugf("getCodeFromFS %s", path)
+ gopath := os.Getenv("GOPATH")
+ if gopath == "" {
+ err = fmt.Errorf("GOPATH not defined")
+ return
+ }
+ // Only take the first element of GOPATH
+ codegopath = filepath.SplitList(gopath)[0]
+
+ return
+}
+
+//generateHashcode gets hashcode of the code under path. If path is a HTTP(s) url
+//it downloads the code first to compute the hash.
+//NOTE: for dev mode, user builds and runs chaincode manually. The name provided
+//by the user is equivalent to the path. This method will treat the name
+//as codebytes and compute the hash from it. ie, user cannot run the chaincode
+//with the same (name, ctor, args)
+func generateHashcode(spec *pb.ChaincodeSpec, tw *tar.Writer) (string, error) {
+ if spec == nil {
+ return "", fmt.Errorf("Cannot generate hashcode from nil spec")
+ }
+
+ chaincodeID := spec.ChaincodeID
+ if chaincodeID == nil || chaincodeID.Path == "" {
+ return "", fmt.Errorf("Cannot generate hashcode from empty chaincode path")
+ }
+
+ ctor := spec.CtorMsg
+ if ctor == nil || ctor.Function == "" {
+ return "", fmt.Errorf("Cannot generate hashcode from empty ctor")
+ }
+
+ //code root will point to the directory where the code exists
+ //in the case of http it will be a temporary dir that
+ //will have to be deleted
+ var codegopath string
+
+ var ishttp bool
+ defer func() {
+ if ishttp && codegopath != "" {
+ os.RemoveAll(codegopath)
+ }
+ }()
+
+ path := chaincodeID.Path
+
+ var err error
+ var actualcodepath string
+ if strings.HasPrefix(path, "http://") {
+ ishttp = true
+ actualcodepath = path[7:]
+ codegopath, err = getCodeFromHTTP(actualcodepath)
+ } else if strings.HasPrefix(path, "https://") {
+ ishttp = true
+ actualcodepath = path[8:]
+ codegopath, err = getCodeFromHTTP(actualcodepath)
+ } else {
+ actualcodepath = path
+ codegopath, err = getCodeFromFS(path)
+ }
+
+ if err != nil {
+ return "", fmt.Errorf("Error getting code %s", err)
+ }
+
+ tmppath := filepath.Join(codegopath, "src", actualcodepath)
+ if err = isCodeExist(tmppath); err != nil {
+ return "", fmt.Errorf("code does not exist %s", err)
+ }
+
+ hash := util.GenerateHashFromSignature(actualcodepath, ctor.Function, ctor.Args)
+
+ hash, err = hashFilesInDir(filepath.Join(codegopath, "src"), actualcodepath, hash, tw)
+ if err != nil {
+ return "", fmt.Errorf("Could not get hashcode for %s - %s\n", path, err)
+ }
+
+ return hex.EncodeToString(hash[:]), nil
+}
diff --git a/core/chaincode/platforms/golang/hash_test.go b/core/chaincode/platforms/golang/hash_test.go
new file mode 100644
index 00000000000..0c49157944c
--- /dev/null
+++ b/core/chaincode/platforms/golang/hash_test.go
@@ -0,0 +1,159 @@
+/*
+Copyright IBM Corp. 2016 All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package golang
+
+import (
+ "bytes"
+ "encoding/hex"
+ "math/rand"
+ "testing"
+ "time"
+
+ "github.com/hyperledger/fabric/core/util"
+)
+
+// TestHashContentChange changes a random byte in a content and checks for hash change
+func TestHashContentChange(t *testing.T) {
+ b := []byte("firstcontent")
+ hash := util.ComputeCryptoHash(b)
+
+ b2 := []byte("To be, or not to be- that is the question: Whether 'tis nobler in the mind to suffer The slings and arrows of outrageous fortune Or to take arms against a sea of troubles, And by opposing end them. To die- to sleep- No more; and by a sleep to say we end The heartache, and the thousand natural shocks That flesh is heir to. 'Tis a consummation Devoutly to be wish'd.")
+
+ h1 := computeHash(b2, hash)
+
+ r := rand.New(rand.NewSource(time.Now().UnixNano()))
+ randIndex := (int(r.Uint32())) % len(b2)
+
+ randByte := byte((int(r.Uint32())) % 128)
+
+ //make sure the two bytes are different
+ for {
+ if randByte != b2[randIndex] {
+ break
+ }
+
+ randByte = byte((int(r.Uint32())) % 128)
+ }
+
+ //change a random byte
+ b2[randIndex] = randByte
+
+ //this is the core hash func under test
+ h2 := computeHash(b2, hash)
+
+ //the two hashes should be different
+ if bytes.Compare(h1, h2) == 0 {
+ t.Fail()
+ t.Logf("Hash expected to be different but is same")
+ }
+}
+
+// TestHashLenChange changes a random length of a content and checks for hash change
+func TestHashLenChange(t *testing.T) {
+ b := []byte("firstcontent")
+ hash := util.ComputeCryptoHash(b)
+
+ b2 := []byte("To be, or not to be-")
+
+ h1 := computeHash(b2, hash)
+
+ r := rand.New(rand.NewSource(time.Now().UnixNano()))
+ randIndex := (int(r.Uint32())) % len(b2)
+
+ b2 = b2[0:randIndex]
+
+ h2 := computeHash(b2, hash)
+
+ //hash should be different
+ if bytes.Compare(h1, h2) == 0 {
+ t.Fail()
+ t.Logf("Hash expected to be different but is same")
+ }
+}
+
+// TestHashOrderChange changes a order of hash computation over a list of lines and checks for hash change
+func TestHashOrderChange(t *testing.T) {
+ b := []byte("firstcontent")
+ hash := util.ComputeCryptoHash(b)
+
+ b2 := [][]byte{[]byte("To be, or not to be- that is the question:"),
+ []byte("Whether 'tis nobler in the mind to suffer"),
+ []byte("The slings and arrows of outrageous fortune"),
+ []byte("Or to take arms against a sea of troubles,"),
+ []byte("And by opposing end them."),
+ []byte("To die- to sleep- No more; and by a sleep to say we end"),
+ []byte("The heartache, and the thousand natural shocks"),
+ []byte("That flesh is heir to."),
+ []byte("'Tis a consummation Devoutly to be wish'd.")}
+ h1 := hash
+
+ for _, l := range b2 {
+ h1 = computeHash(l, h1)
+ }
+
+ r := rand.New(rand.NewSource(time.Now().UnixNano()))
+ randIndex1 := (int(r.Uint32())) % len(b2)
+ randIndex2 := (int(r.Uint32())) % len(b2)
+
+ //make sure the two indeces are different
+ for {
+ if randIndex2 != randIndex1 {
+ break
+ }
+
+ randIndex2 = (int(r.Uint32())) % len(b2)
+ }
+
+ //switch two arbitrary lines
+ tmp := b2[randIndex2]
+ b2[randIndex2] = b2[randIndex1]
+ b2[randIndex1] = tmp
+
+ h2 := hash
+ for _, l := range b2 {
+ h2 = computeHash(l, hash)
+ }
+
+ //hash should be different
+ if bytes.Compare(h1, h2) == 0 {
+ t.Fail()
+ t.Logf("Hash expected to be different but is same")
+ }
+}
+
+// TestHashOverFiles computes hash over a directory and ensures it matches precomputed, hardcoded, hash
+func TestHashOverFiles(t *testing.T) {
+ b := []byte("firstcontent")
+ hash := util.ComputeCryptoHash(b)
+
+ hash, err := hashFilesInDir(".", "hashtestfiles", hash, nil)
+
+ if err != nil {
+ t.Fail()
+ t.Logf("error : %s", err)
+ }
+
+ //as long as no files under "hashtestfiles" are changed, hash should always compute to the following
+ expectedHash := "a4fe18bebf3d7e1c030c042903bdda9019b33829d03d9b95ab1edc8957be70dee6d786ab27b207210d29b5d9f88456ff753b8da5c244458cdcca6eb3c28a17ce"
+
+ computedHash := hex.EncodeToString(hash[:])
+
+ if expectedHash != computedHash {
+ t.Fail()
+ t.Logf("Hash expected to be unchanged")
+ }
+}
diff --git a/core/chaincode/platforms/golang/hashtestfiles/a.txt b/core/chaincode/platforms/golang/hashtestfiles/a.txt
new file mode 100644
index 00000000000..78981922613
--- /dev/null
+++ b/core/chaincode/platforms/golang/hashtestfiles/a.txt
@@ -0,0 +1 @@
+a
diff --git a/core/chaincode/platforms/golang/hashtestfiles/a/a1.txt b/core/chaincode/platforms/golang/hashtestfiles/a/a1.txt
new file mode 100644
index 00000000000..da0f8ed91a8
--- /dev/null
+++ b/core/chaincode/platforms/golang/hashtestfiles/a/a1.txt
@@ -0,0 +1 @@
+a1
diff --git a/core/chaincode/platforms/golang/hashtestfiles/a/a2.txt b/core/chaincode/platforms/golang/hashtestfiles/a/a2.txt
new file mode 100644
index 00000000000..c1827f07e11
--- /dev/null
+++ b/core/chaincode/platforms/golang/hashtestfiles/a/a2.txt
@@ -0,0 +1 @@
+a2
diff --git a/core/chaincode/platforms/golang/hashtestfiles/b.txt b/core/chaincode/platforms/golang/hashtestfiles/b.txt
new file mode 100644
index 00000000000..61780798228
--- /dev/null
+++ b/core/chaincode/platforms/golang/hashtestfiles/b.txt
@@ -0,0 +1 @@
+b
diff --git a/core/chaincode/platforms/golang/hashtestfiles/b/c.txt b/core/chaincode/platforms/golang/hashtestfiles/b/c.txt
new file mode 100644
index 00000000000..f2ad6c76f01
--- /dev/null
+++ b/core/chaincode/platforms/golang/hashtestfiles/b/c.txt
@@ -0,0 +1 @@
+c
diff --git a/core/chaincode/platforms/golang/hashtestfiles/b/c/c1.txt b/core/chaincode/platforms/golang/hashtestfiles/b/c/c1.txt
new file mode 100644
index 00000000000..ae9304576a6
--- /dev/null
+++ b/core/chaincode/platforms/golang/hashtestfiles/b/c/c1.txt
@@ -0,0 +1 @@
+c1
diff --git a/core/chaincode/platforms/golang/package.go b/core/chaincode/platforms/golang/package.go
new file mode 100644
index 00000000000..3754d288f35
--- /dev/null
+++ b/core/chaincode/platforms/golang/package.go
@@ -0,0 +1,84 @@
+/*
+Copyright IBM Corp. 2016 All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package golang
+
+import (
+ "archive/tar"
+ "fmt"
+ "strings"
+ "time"
+
+ "github.com/spf13/viper"
+
+ cutil "github.com/hyperledger/fabric/core/container/util"
+ pb "github.com/hyperledger/fabric/protos"
+)
+
+//tw is expected to have the chaincode in it from GenerateHashcode. This method
+//will just package rest of the bytes
+func writeChaincodePackage(spec *pb.ChaincodeSpec, tw *tar.Writer) error {
+
+ var urlLocation string
+ if strings.HasPrefix(spec.ChaincodeID.Path, "http://") {
+ urlLocation = spec.ChaincodeID.Path[7:]
+ } else if strings.HasPrefix(spec.ChaincodeID.Path, "https://") {
+ urlLocation = spec.ChaincodeID.Path[8:]
+ } else {
+ urlLocation = spec.ChaincodeID.Path
+ }
+
+ if urlLocation == "" {
+ return fmt.Errorf("empty url location")
+ }
+
+ if strings.LastIndex(urlLocation, "/") == len(urlLocation)-1 {
+ urlLocation = urlLocation[:len(urlLocation)-1]
+ }
+ toks := strings.Split(urlLocation, "/")
+ if toks == nil || len(toks) == 0 {
+ return fmt.Errorf("cannot get path components from %s", urlLocation)
+ }
+
+ chaincodeGoName := toks[len(toks)-1]
+ if chaincodeGoName == "" {
+ return fmt.Errorf("could not get chaincode name from path %s", urlLocation)
+ }
+
+ //let the executable's name be chaincode ID's name
+ newRunLine := fmt.Sprintf("RUN go install %s && cp src/github.com/hyperledger/fabric/peer/core.yaml $GOPATH/bin && mv $GOPATH/bin/%s $GOPATH/bin/%s", urlLocation, chaincodeGoName, spec.ChaincodeID.Name)
+
+ //NOTE-this could have been abstracted away so we could use it for all platforms in a common manner
+ //However, it would still be docker specific. Hence any such abstraction has to be done in a manner that
+ //is not just language dependent but also container depenedent. So lets make this change per platform for now
+ //in the interest of avoiding over-engineering without proper abstraction
+ if viper.GetBool("peer.tls.enabled") {
+ newRunLine = fmt.Sprintf("%s\nCOPY src/certs/cert.pem %s", newRunLine, viper.GetString("peer.tls.cert.file"))
+ }
+
+ dockerFileContents := fmt.Sprintf("%s\n%s", viper.GetString("chaincode.golang.Dockerfile"), newRunLine)
+ dockerFileSize := int64(len([]byte(dockerFileContents)))
+
+ //Make headers identical by using zero time
+ var zeroTime time.Time
+ tw.WriteHeader(&tar.Header{Name: "Dockerfile", Size: dockerFileSize, ModTime: zeroTime, AccessTime: zeroTime, ChangeTime: zeroTime})
+ tw.Write([]byte(dockerFileContents))
+ err := cutil.WriteGopathSrc(tw, urlLocation)
+ if err != nil {
+ return fmt.Errorf("Error writing Chaincode package contents: %s", err)
+ }
+ return nil
+}
diff --git a/core/chaincode/platforms/golang/platform.go b/core/chaincode/platforms/golang/platform.go
new file mode 100644
index 00000000000..27814f97584
--- /dev/null
+++ b/core/chaincode/platforms/golang/platform.go
@@ -0,0 +1,86 @@
+/*
+Copyright IBM Corp. 2016 All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package golang
+
+import (
+ "archive/tar"
+ "fmt"
+ "net/url"
+ "os"
+ "path/filepath"
+
+ pb "github.com/hyperledger/fabric/protos"
+)
+
+// Platform for chaincodes written in Go
+type Platform struct {
+}
+
+// Returns whether the given file or directory exists or not
+func pathExists(path string) (bool, error) {
+ _, err := os.Stat(path)
+ if err == nil {
+ return true, nil
+ }
+ if os.IsNotExist(err) {
+ return false, nil
+ }
+ return true, err
+}
+
+// ValidateSpec validates Go chaincodes
+func (goPlatform *Platform) ValidateSpec(spec *pb.ChaincodeSpec) error {
+ url, err := url.Parse(spec.ChaincodeID.Path)
+ if err != nil || url == nil {
+ return fmt.Errorf("invalid path: %s", err)
+ }
+
+ //we have no real good way of checking existence of remote urls except by downloading and testin
+ //which we do later anyway. But we *can* - and *should* - test for existence of local paths.
+ //Treat empty scheme as a local filesystem path
+ if url.Scheme == "" {
+ gopath := os.Getenv("GOPATH")
+ // Only take the first element of GOPATH
+ gopath = filepath.SplitList(gopath)[0]
+ pathToCheck := filepath.Join(gopath, "src", spec.ChaincodeID.Path)
+ exists, err := pathExists(pathToCheck)
+ if err != nil {
+ return fmt.Errorf("Error validating chaincode path: %s", err)
+ }
+ if !exists {
+ return fmt.Errorf("Path to chaincode does not exist: %s", spec.ChaincodeID.Path)
+ }
+ }
+ return nil
+}
+
+// WritePackage writes the Go chaincode package
+func (goPlatform *Platform) WritePackage(spec *pb.ChaincodeSpec, tw *tar.Writer) error {
+
+ var err error
+ spec.ChaincodeID.Name, err = generateHashcode(spec, tw)
+ if err != nil {
+ return err
+ }
+
+ err = writeChaincodePackage(spec, tw)
+ if err != nil {
+ return err
+ }
+
+ return nil
+}
diff --git a/core/chaincode/platforms/java/hash.go b/core/chaincode/platforms/java/hash.go
new file mode 100644
index 00000000000..6697598c04e
--- /dev/null
+++ b/core/chaincode/platforms/java/hash.go
@@ -0,0 +1,144 @@
+package java
+
+import (
+ "archive/tar"
+ "bytes"
+ "encoding/hex"
+ "fmt"
+ "io/ioutil"
+ "os"
+ "strings"
+
+ cutil "github.com/hyperledger/fabric/core/container/util"
+ "github.com/hyperledger/fabric/core/util"
+ pb "github.com/hyperledger/fabric/protos"
+)
+
+//hashFilesInDir computes h=hash(h,file bytes) for each file in a directory
+//Directory entries are traversed recursively. In the end a single
+//hash value is returned for the entire directory structure
+func hashFilesInDir(cutoff string, dir string, hash []byte, tw *tar.Writer) ([]byte, error) {
+ //ReadDir returns sorted list of files in dir
+ fis, err := ioutil.ReadDir(dir)
+ if err != nil {
+ return hash, fmt.Errorf("ReadDir failed %s\n", err)
+ }
+ for _, fi := range fis {
+ name := fmt.Sprintf("%s/%s", dir, fi.Name())
+ if fi.IsDir() {
+ var err error
+ hash, err = hashFilesInDir(cutoff, name, hash, tw)
+ if err != nil {
+ return hash, err
+ }
+ continue
+ }
+ buf, err := ioutil.ReadFile(name)
+ if err != nil {
+ fmt.Printf("Error reading %s\n", err)
+ return hash, err
+ }
+
+ newSlice := make([]byte, len(hash)+len(buf))
+ copy(newSlice[len(buf):], hash[:])
+ //hash = md5.Sum(newSlice)
+ hash = util.ComputeCryptoHash(newSlice)
+
+ if tw != nil {
+ is := bytes.NewReader(buf)
+ if err = cutil.WriteStreamToPackage(is, name, name[len(cutoff):], tw); err != nil {
+ return hash, fmt.Errorf("Error adding file to tar %s", err)
+ }
+ }
+ }
+ return hash, nil
+}
+
+func isCodeExist(tmppath string) error {
+ file, err := os.Open(tmppath)
+ if err != nil {
+ return fmt.Errorf("Download failer %s", err)
+ }
+ fi, err := file.Stat()
+ if err != nil {
+ return fmt.Errorf("could not stat file %s", err)
+ }
+ if !fi.IsDir() {
+ return fmt.Errorf("file %s is not dir\n", file.Name())
+ }
+ return nil
+}
+
+func getCodeFromHTTP(path string) (codegopath string, err error) {
+ //TODO
+ return "", nil
+}
+
+//generateHashcode gets hashcode of the code under path. If path is a HTTP(s) url
+//it downloads the code first to compute the hash.
+//NOTE: for dev mode, user builds and runs chaincode manually. The name provided
+//by the user is equivalent to the path. This method will treat the name
+//as codebytes and compute the hash from it. ie, user cannot run the chaincode
+//with the same (name, ctor, args)
+func generateHashcode(spec *pb.ChaincodeSpec, tw *tar.Writer) (string, error) {
+ if spec == nil {
+ return "", fmt.Errorf("Cannot generate hashcode from nil spec")
+ }
+
+ chaincodeID := spec.ChaincodeID
+ if chaincodeID == nil || chaincodeID.Path == "" {
+ return "", fmt.Errorf("Cannot generate hashcode from empty chaincode path")
+ }
+
+ ctor := spec.CtorMsg
+ if ctor == nil || ctor.Function == "" {
+ return "", fmt.Errorf("Cannot generate hashcode from empty ctor")
+ }
+
+ codepath := chaincodeID.Path
+
+ var ishttp bool
+ defer func() {
+ if ishttp {
+ os.RemoveAll(codepath)
+ }
+ }()
+
+ var err error
+ if strings.HasPrefix(codepath, "http://") {
+ ishttp = true
+ codepath = codepath[7:]
+ codepath, err = getCodeFromHTTP(codepath)
+ } else if strings.HasPrefix(codepath, "https://") {
+ ishttp = true
+ codepath = codepath[8:]
+ codepath, err = getCodeFromHTTP(codepath)
+ } else if !strings.HasPrefix(codepath, "/") {
+ wd := ""
+ wd, err = os.Getwd()
+ codepath = wd + "/" + codepath
+ }
+
+ if err != nil {
+ return "", fmt.Errorf("Error getting code %s", err)
+ }
+
+ if err = isCodeExist(codepath); err != nil {
+ return "", fmt.Errorf("code does not exist %s", err)
+ }
+
+ root := codepath
+ if strings.LastIndex(root, "/") == len(root)-1 {
+ root = root[:len(root)-1]
+ }
+ root = root[:strings.LastIndex(root, "/")+1]
+
+ hash := util.GenerateHashFromSignature(codepath, ctor.Function, ctor.Args)
+
+ hash, err = hashFilesInDir(root, codepath, hash, tw)
+ if err != nil {
+ return "", fmt.Errorf("Could not get hashcode for %s - %s\n", codepath, err)
+ }
+
+ return hex.EncodeToString(hash[:]), nil
+}
diff --git a/core/chaincode/platforms/java/package.go b/core/chaincode/platforms/java/package.go
new file mode 100644
index 00000000000..09e571ceb86
--- /dev/null
+++ b/core/chaincode/platforms/java/package.go
@@ -0,0 +1,57 @@
+package java
+
+import (
+ "archive/tar"
+ "fmt"
+ "strings"
+ "time"
+
+ "github.com/spf13/viper"
+
+ pb "github.com/hyperledger/fabric/protos"
+)
+
+//tw is expected to have the chaincode in it from GenerateHashcode.
+//This method will just package the dockerfile
+func writeChaincodePackage(spec *pb.ChaincodeSpec, tw *tar.Writer) error {
+
+ var urlLocation string
+ if strings.HasPrefix(spec.ChaincodeID.Path, "http://") {
+ urlLocation = spec.ChaincodeID.Path[7:]
+ } else if strings.HasPrefix(spec.ChaincodeID.Path, "https://") {
+ urlLocation = spec.ChaincodeID.Path[8:]
+ } else {
+ urlLocation = spec.ChaincodeID.Path
+ // if !strings.HasPrefix(urlLocation, "/") {
+ // wd := ""
+ // wd, _ = os.Getwd()
+ // urlLocation = wd + "/" + urlLocation
+ // }
+ }
+
+ if urlLocation == "" {
+ return fmt.Errorf("empty url location")
+ }
+
+ if strings.LastIndex(urlLocation, "/") == len(urlLocation)-1 {
+ urlLocation = urlLocation[:len(urlLocation)-1]
+ }
+ urlLocation = urlLocation[strings.LastIndex(urlLocation, "/")+1:]
+
+ var newRunLine string
+ if viper.GetBool("security.enabled") {
+ //todo
+ } else {
+ newRunLine = fmt.Sprintf("COPY %s /root/\n"+
+ "RUN cd /root/ && gradle build", urlLocation)
+ }
+
+ dockerFileContents := fmt.Sprintf("%s\n%s", viper.GetString("chaincode.java.Dockerfile"), newRunLine)
+ dockerFileSize := int64(len([]byte(dockerFileContents)))
+
+ //Make headers identical by using zero time
+ var zeroTime time.Time
+ tw.WriteHeader(&tar.Header{Name: "Dockerfile", Size: dockerFileSize, ModTime: zeroTime, AccessTime: zeroTime, ChangeTime: zeroTime})
+ tw.Write([]byte(dockerFileContents))
+ return nil
+}
diff --git a/core/chaincode/platforms/java/platform.go b/core/chaincode/platforms/java/platform.go
new file mode 100644
index 00000000000..0d21a78cdd8
--- /dev/null
+++ b/core/chaincode/platforms/java/platform.go
@@ -0,0 +1,67 @@
+package java
+
+import (
+ "archive/tar"
+ "fmt"
+ "net/url"
+ "os"
+
+ pb "github.com/hyperledger/fabric/protos"
+ // "path/filepath"
+)
+
+// Platform for java chaincodes in java
+type Platform struct {
+}
+
+// Returns whether the given file or directory exists or not
+func pathExists(path string) (bool, error) {
+ _, err := os.Stat(path)
+ if err == nil {
+ return true, nil
+ }
+ if os.IsNotExist(err) {
+ return false, nil
+ }
+ return true, err
+}
+
+//ValidateSpec validates the java chaincode specs
+func (javaPlatform *Platform) ValidateSpec(spec *pb.ChaincodeSpec) error {
+ url, err := url.Parse(spec.ChaincodeID.Path)
+ if err != nil || url == nil {
+ return fmt.Errorf("invalid path: %s", err)
+ }
+
+ //we have no real good way of checking existence of remote urls except by downloading and testing
+ //which we do later anyway. But we *can* - and *should* - test for existence of local paths.
+ //Treat empty scheme as a local filesystem path
+ // if url.Scheme == "" {
+ // pathToCheck := filepath.Join(os.Getenv("GOPATH"), "src", spec.ChaincodeID.Path)
+ // exists, err := pathExists(pathToCheck)
+ // if err != nil {
+ // return fmt.Errorf("Error validating chaincode path: %s", err)
+ // }
+ // if !exists {
+ // return fmt.Errorf("Path to chaincode does not exist: %s", spec.ChaincodeID.Path)
+ // }
+ // }
+ return nil
+}
+
+// WritePackage writes the java chaincode package
+func (javaPlatform *Platform) WritePackage(spec *pb.ChaincodeSpec, tw *tar.Writer) error {
+
+ var err error
+ spec.ChaincodeID.Name, err = generateHashcode(spec, tw)
+ if err != nil {
+ return err
+ }
+
+ err = writeChaincodePackage(spec, tw)
+ if err != nil {
+ return err
+ }
+
+ return nil
+}
diff --git a/core/chaincode/platforms/java/test/java_test.go b/core/chaincode/platforms/java/test/java_test.go
new file mode 100644
index 00000000000..789542173fd
--- /dev/null
+++ b/core/chaincode/platforms/java/test/java_test.go
@@ -0,0 +1,50 @@
+/*
+Copyright IBM Corp. 2016 All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package test
+
+import (
+ "os"
+ "testing"
+
+ "github.com/hyperledger/fabric/core/config"
+ "github.com/hyperledger/fabric/core/container"
+ pb "github.com/hyperledger/fabric/protos"
+)
+
+func TestMain(m *testing.M) {
+ config.SetupTestConfig("../../../../../peer")
+ os.Exit(m.Run())
+}
+
+func TestJava_BuildImage(t *testing.T) {
+
+ vm, err := container.NewVM()
+ if err != nil {
+ t.Fail()
+ t.Logf("Error getting VM: %s", err)
+ return
+ }
+
+ chaincodePath := "../../../shim/java"
+ //TODO find a better way to launch example java chaincode
+ spec := &pb.ChaincodeSpec{Type: pb.ChaincodeSpec_JAVA, ChaincodeID: &pb.ChaincodeID{Path: chaincodePath}, CtorMsg: &pb.ChaincodeInput{Function: "f"}}
+ if _, err := vm.BuildChaincodeContainer(spec); err != nil {
+ t.Fail()
+ t.Log(err)
+ }
+
+}
diff --git a/core/chaincode/platforms/platforms.go b/core/chaincode/platforms/platforms.go
new file mode 100644
index 00000000000..b6033f81173
--- /dev/null
+++ b/core/chaincode/platforms/platforms.go
@@ -0,0 +1,50 @@
+/*
+Copyright IBM Corp. 2016 All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package platforms
+
+import (
+ "archive/tar"
+ "fmt"
+
+ "github.com/hyperledger/fabric/core/chaincode/platforms/car"
+ "github.com/hyperledger/fabric/core/chaincode/platforms/golang"
+ "github.com/hyperledger/fabric/core/chaincode/platforms/java"
+ pb "github.com/hyperledger/fabric/protos"
+)
+
+// Interface for validating the specification and and writing the package for
+// the given platform
+type Platform interface {
+ ValidateSpec(spec *pb.ChaincodeSpec) error
+ WritePackage(spec *pb.ChaincodeSpec, tw *tar.Writer) error
+}
+
+// Find returns the platform interface for the given platform type
+func Find(chaincodeType pb.ChaincodeSpec_Type) (Platform, error) {
+
+ switch chaincodeType {
+ case pb.ChaincodeSpec_GOLANG:
+ return &golang.Platform{}, nil
+ case pb.ChaincodeSpec_CAR:
+ return &car.Platform{}, nil
+ case pb.ChaincodeSpec_JAVA:
+ return &java.Platform{}, nil
+ default:
+ return nil, fmt.Errorf("Unknown chaincodeType: %s", chaincodeType)
+ }
+
+}
diff --git a/core/chaincode/shim/chaincode.go b/core/chaincode/shim/chaincode.go
new file mode 100644
index 00000000000..1dbefaa4d13
--- /dev/null
+++ b/core/chaincode/shim/chaincode.go
@@ -0,0 +1,1011 @@
+/*
+Copyright IBM Corp. 2016 All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Package shim provides APIs for the chaincode to access its state
+// variables, transaction context and call other chaincodes.
+package shim
+
+import (
+ "bytes"
+ "errors"
+ "flag"
+ "fmt"
+ "io"
+ "os"
+ "strconv"
+ "strings"
+
+ gp "google/protobuf"
+
+ "github.com/golang/protobuf/proto"
+ "github.com/hyperledger/fabric/core/chaincode/shim/crypto/attr"
+ "github.com/hyperledger/fabric/core/chaincode/shim/crypto/ecdsa"
+ "github.com/hyperledger/fabric/core/comm"
+ pb "github.com/hyperledger/fabric/protos"
+ "github.com/op/go-logging"
+ "github.com/spf13/viper"
+ "golang.org/x/net/context"
+ "google.golang.org/grpc"
+)
+
+// Logger for the shim package.
+var chaincodeLogger = logging.MustGetLogger("shim")
+
+// Handler to shim that handles all control logic.
+var handler *Handler
+
+// Chaincode interface must be implemented by all chaincodes. The fabric runs
+// the transactions by calling these functions as specified.
+type Chaincode interface {
+ // Init is called during Deploy transaction after the container has been
+ // established, allowing the chaincode to initialize its internal data
+ Init(stub *ChaincodeStub, function string, args []string) ([]byte, error)
+
+ // Invoke is called for every Invoke transactions. The chaincode may change
+ // its state variables
+ Invoke(stub *ChaincodeStub, function string, args []string) ([]byte, error)
+
+ // Query is called for Query transactions. The chaincode may only read
+ // (but not modify) its state variables and return the result
+ Query(stub *ChaincodeStub, function string, args []string) ([]byte, error)
+}
+
+// ChaincodeStub is an object passed to chaincode for shim side handling of
+// APIs.
+type ChaincodeStub struct {
+ UUID string
+ securityContext *pb.ChaincodeSecurityContext
+ chaincodeEvent *pb.ChaincodeEvent
+}
+
+// Peer address derived from command line or env var
+var peerAddress string
+
+// Start is the entry point for chaincodes bootstrap. It is not an API for
+// chaincodes.
+func Start(cc Chaincode) error {
+ // If Start() is called, we assume this is a standalone chaincode and set
+ // up formatted logging.
+ format := logging.MustStringFormatter("%{time:15:04:05.000} [%{module}] %{level:.4s} : %{message}")
+ backend := logging.NewLogBackend(os.Stderr, "", 0)
+ backendFormatter := logging.NewBackendFormatter(backend, format)
+ logging.SetBackend(backendFormatter).SetLevel(logging.Level(shimLoggingLevel), "shim")
+
+ viper.SetEnvPrefix("CORE")
+ viper.AutomaticEnv()
+ replacer := strings.NewReplacer(".", "_")
+ viper.SetEnvKeyReplacer(replacer)
+
+ flag.StringVar(&peerAddress, "peer.address", "", "peer address")
+
+ flag.Parse()
+
+ chaincodeLogger.Debugf("Peer address: %s", getPeerAddress())
+
+ // Establish connection with validating peer
+ clientConn, err := newPeerClientConnection()
+ if err != nil {
+ chaincodeLogger.Errorf("Error trying to connect to local peer: %s", err)
+ return fmt.Errorf("Error trying to connect to local peer: %s", err)
+ }
+
+ chaincodeLogger.Debugf("os.Args returns: %s", os.Args)
+
+ chaincodeSupportClient := pb.NewChaincodeSupportClient(clientConn)
+
+ // Establish stream with validating peer
+ stream, err := chaincodeSupportClient.Register(context.Background())
+ if err != nil {
+ return fmt.Errorf("Error chatting with leader at address=%s: %s", getPeerAddress(), err)
+ }
+
+ chaincodename := viper.GetString("chaincode.id.name")
+ err = chatWithPeer(chaincodename, stream, cc)
+
+ return err
+}
+
+// StartInProc is an entry point for system chaincodes bootstrap. It is not an
+// API for chaincodes.
+func StartInProc(env []string, args []string, cc Chaincode, recv <-chan *pb.ChaincodeMessage, send chan<- *pb.ChaincodeMessage) error {
+ logging.SetLevel(logging.DEBUG, "chaincode")
+ chaincodeLogger.Debugf("in proc %v", args)
+
+ var chaincodename string
+ for _, v := range env {
+ if strings.Index(v, "CORE_CHAINCODE_ID_NAME=") == 0 {
+ p := strings.SplitAfter(v, "CORE_CHAINCODE_ID_NAME=")
+ chaincodename = p[1]
+ break
+ }
+ }
+ if chaincodename == "" {
+ return fmt.Errorf("Error chaincode id not provided")
+ }
+ chaincodeLogger.Debugf("starting chat with peer using name=%s", chaincodename)
+ stream := newInProcStream(recv, send)
+ err := chatWithPeer(chaincodename, stream, cc)
+ return err
+}
+
+func getPeerAddress() string {
+ if peerAddress != "" {
+ return peerAddress
+ }
+
+ if peerAddress = viper.GetString("peer.address"); peerAddress == "" {
+ chaincodeLogger.Fatalf("peer.address not configured, can't connect to peer")
+ }
+
+ return peerAddress
+}
+
+func newPeerClientConnection() (*grpc.ClientConn, error) {
+ var peerAddress = getPeerAddress()
+ if comm.TLSEnabled() {
+ return comm.NewClientConnectionWithAddress(peerAddress, true, true, comm.InitTLSForPeer())
+ }
+ return comm.NewClientConnectionWithAddress(peerAddress, true, false, nil)
+}
+
+func chatWithPeer(chaincodename string, stream PeerChaincodeStream, cc Chaincode) error {
+
+ // Create the shim handler responsible for all control logic
+ handler = newChaincodeHandler(stream, cc)
+
+ defer stream.CloseSend()
+ // Send the ChaincodeID during register.
+ chaincodeID := &pb.ChaincodeID{Name: chaincodename}
+ payload, err := proto.Marshal(chaincodeID)
+ if err != nil {
+ return fmt.Errorf("Error marshalling chaincodeID during chaincode registration: %s", err)
+ }
+ // Register on the stream
+ chaincodeLogger.Debugf("Registering.. sending %s", pb.ChaincodeMessage_REGISTER)
+ handler.serialSend(&pb.ChaincodeMessage{Type: pb.ChaincodeMessage_REGISTER, Payload: payload})
+ waitc := make(chan struct{})
+ go func() {
+ defer close(waitc)
+ msgAvail := make(chan *pb.ChaincodeMessage)
+ var nsInfo *nextStateInfo
+ var in *pb.ChaincodeMessage
+ recv := true
+ for {
+ in = nil
+ err = nil
+ nsInfo = nil
+ if recv {
+ recv = false
+ go func() {
+ var in2 *pb.ChaincodeMessage
+ in2, err = stream.Recv()
+ msgAvail <- in2
+ }()
+ }
+ select {
+ case in = <-msgAvail:
+ if err == io.EOF {
+ chaincodeLogger.Debugf("Received EOF, ending chaincode stream, %s", err)
+ return
+ } else if err != nil {
+ chaincodeLogger.Errorf("Received error from server: %s, ending chaincode stream", err)
+ return
+ } else if in == nil {
+ err = fmt.Errorf("Received nil message, ending chaincode stream")
+ chaincodeLogger.Debug("Received nil message, ending chaincode stream")
+ return
+ }
+ chaincodeLogger.Debugf("[%s]Received message %s from shim", shortuuid(in.Uuid), in.Type.String())
+ recv = true
+ case nsInfo = <-handler.nextState:
+ in = nsInfo.msg
+ if in == nil {
+ panic("nil msg")
+ }
+ chaincodeLogger.Debugf("[%s]Move state message %s", shortuuid(in.Uuid), in.Type.String())
+ }
+
+ // Call FSM.handleMessage()
+ err = handler.handleMessage(in)
+ if err != nil {
+ err = fmt.Errorf("Error handling message: %s", err)
+ return
+ }
+
+ //keepalive messages are PONGs to the fabric's PINGs
+ if (nsInfo != nil && nsInfo.sendToCC) || (in.Type == pb.ChaincodeMessage_KEEPALIVE) {
+ if in.Type == pb.ChaincodeMessage_KEEPALIVE {
+ chaincodeLogger.Debug("Sending KEEPALIVE response")
+ } else {
+ chaincodeLogger.Debugf("[%s]send state message %s", shortuuid(in.Uuid), in.Type.String())
+ }
+ if err = handler.serialSend(in); err != nil {
+ err = fmt.Errorf("Error sending %s: %s", in.Type.String(), err)
+ return
+ }
+ }
+ }
+ }()
+ <-waitc
+ return err
+}
+
+// -- init stub ---
+func (stub *ChaincodeStub) init(uuid string, secContext *pb.ChaincodeSecurityContext) {
+ stub.UUID = uuid
+ stub.securityContext = secContext
+}
+
+// --------- Security functions ----------
+//CHAINCODE SEC INTERFACE FUNCS TOBE IMPLEMENTED BY ANGELO
+
+// ------------- Call Chaincode functions ---------------
+
+// InvokeChaincode locally calls the specified chaincode `Invoke` using the
+// same transaction context; that is, chaincode calling chaincode doesn't
+// create a new transaction message.
+func (stub *ChaincodeStub) InvokeChaincode(chaincodeName string, function string, args []string) ([]byte, error) {
+ return handler.handleInvokeChaincode(chaincodeName, function, args, stub.UUID)
+}
+
+// QueryChaincode locally calls the specified chaincode `Query` using the
+// same transaction context; that is, chaincode calling chaincode doesn't
+// create a new transaction message.
+func (stub *ChaincodeStub) QueryChaincode(chaincodeName string, function string, args []string) ([]byte, error) {
+ return handler.handleQueryChaincode(chaincodeName, function, args, stub.UUID)
+}
+
+// --------- State functions ----------
+
+// GetState returns the byte array value specified by the `key`.
+func (stub *ChaincodeStub) GetState(key string) ([]byte, error) {
+ return handler.handleGetState(key, stub.UUID)
+}
+
+// PutState writes the specified `value` and `key` into the ledger.
+func (stub *ChaincodeStub) PutState(key string, value []byte) error {
+ return handler.handlePutState(key, value, stub.UUID)
+}
+
+// DelState removes the specified `key` and its value from the ledger.
+func (stub *ChaincodeStub) DelState(key string) error {
+ return handler.handleDelState(key, stub.UUID)
+}
+
+//ReadCertAttribute is used to read an specific attribute from the transaction certificate, *attributeName* is passed as input parameter to this function.
+// Example:
+// attrValue,error:=stub.ReadCertAttribute("position")
+func (stub *ChaincodeStub) ReadCertAttribute(attributeName string) ([]byte, error) {
+ attributesHandler, err := attr.NewAttributesHandlerImpl(stub)
+ if err != nil {
+ return nil, err
+ }
+ return attributesHandler.GetValue(attributeName)
+}
+
+//VerifyAttribute is used to verify if the transaction certificate has an attribute with name *attributeName* and value *attributeValue* which are the input parameters received by this function.
+//Example:
+// containsAttr, error := stub.VerifyAttribute("position", "Software Engineer")
+func (stub *ChaincodeStub) VerifyAttribute(attributeName string, attributeValue []byte) (bool, error) {
+ attributesHandler, err := attr.NewAttributesHandlerImpl(stub)
+ if err != nil {
+ return false, err
+ }
+ return attributesHandler.VerifyAttribute(attributeName, attributeValue)
+}
+
+//VerifyAttributes does the same as VerifyAttribute but it checks for a list of attributes and their respective values instead of a single attribute/value pair
+// Example:
+// containsAttrs, error:= stub.VerifyAttributes(&attr.Attribute{"position", "Software Engineer"}, &attr.Attribute{"company", "ACompany"})
+func (stub *ChaincodeStub) VerifyAttributes(attrs ...*attr.Attribute) (bool, error) {
+ attributesHandler, err := attr.NewAttributesHandlerImpl(stub)
+ if err != nil {
+ return false, err
+ }
+ return attributesHandler.VerifyAttributes(attrs...)
+}
+
+// StateRangeQueryIterator allows a chaincode to iterate over a range of
+// key/value pairs in the state.
+type StateRangeQueryIterator struct {
+ handler *Handler
+ uuid string
+ response *pb.RangeQueryStateResponse
+ currentLoc int
+}
+
+// RangeQueryState function can be invoked by a chaincode to query of a range
+// of keys in the state. Assuming the startKey and endKey are in lexical order,
+// an iterator will be returned that can be used to iterate over all keys
+// between the startKey and endKey, inclusive. The order in which keys are
+// returned by the iterator is random.
+func (stub *ChaincodeStub) RangeQueryState(startKey, endKey string) (*StateRangeQueryIterator, error) {
+ response, err := handler.handleRangeQueryState(startKey, endKey, stub.UUID)
+ if err != nil {
+ return nil, err
+ }
+ return &StateRangeQueryIterator{handler, stub.UUID, response, 0}, nil
+}
+
+// HasNext returns true if the range query iterator contains additional keys
+// and values.
+func (iter *StateRangeQueryIterator) HasNext() bool {
+ if iter.currentLoc < len(iter.response.KeysAndValues) || iter.response.HasMore {
+ return true
+ }
+ return false
+}
+
+// Next returns the next key and value in the range query iterator.
+func (iter *StateRangeQueryIterator) Next() (string, []byte, error) {
+ if iter.currentLoc < len(iter.response.KeysAndValues) {
+ keyValue := iter.response.KeysAndValues[iter.currentLoc]
+ iter.currentLoc++
+ return keyValue.Key, keyValue.Value, nil
+ } else if !iter.response.HasMore {
+ return "", nil, errors.New("No such key")
+ } else {
+ response, err := iter.handler.handleRangeQueryStateNext(iter.response.ID, iter.uuid)
+
+ if err != nil {
+ return "", nil, err
+ }
+
+ iter.currentLoc = 0
+ iter.response = response
+ keyValue := iter.response.KeysAndValues[iter.currentLoc]
+ iter.currentLoc++
+ return keyValue.Key, keyValue.Value, nil
+
+ }
+}
+
+// Close closes the range query iterator. This should be called when done
+// reading from the iterator to free up resources.
+func (iter *StateRangeQueryIterator) Close() error {
+ _, err := iter.handler.handleRangeQueryStateClose(iter.response.ID, iter.uuid)
+ return err
+}
+
+// TABLE FUNCTIONALITY
+// TODO More comments here with documentation
+
+// Table Errors
+var (
+ // ErrTableNotFound if the specified table cannot be found
+ ErrTableNotFound = errors.New("chaincode: Table not found")
+)
+
+// CreateTable creates a new table given the table name and column definitions
+func (stub *ChaincodeStub) CreateTable(name string, columnDefinitions []*ColumnDefinition) error {
+
+ _, err := stub.getTable(name)
+ if err == nil {
+ return fmt.Errorf("CreateTable operation failed. Table %s already exists.", name)
+ }
+ if err != ErrTableNotFound {
+ return fmt.Errorf("CreateTable operation failed. %s", err)
+ }
+
+ if columnDefinitions == nil || len(columnDefinitions) == 0 {
+ return errors.New("Invalid column definitions. Tables must contain at least one column.")
+ }
+
+ hasKey := false
+ nameMap := make(map[string]bool)
+ for i, definition := range columnDefinitions {
+
+ // Check name
+ if definition == nil {
+ return fmt.Errorf("Column definition %d is invalid. Definition must not be nil.", i)
+ }
+ if len(definition.Name) == 0 {
+ return fmt.Errorf("Column definition %d is invalid. Name must be 1 or more characters.", i)
+ }
+ if _, exists := nameMap[definition.Name]; exists {
+ return fmt.Errorf("Invalid table. Table contains duplicate column name '%s'.", definition.Name)
+ }
+ nameMap[definition.Name] = true
+
+ // Check type
+ switch definition.Type {
+ case ColumnDefinition_STRING:
+ case ColumnDefinition_INT32:
+ case ColumnDefinition_INT64:
+ case ColumnDefinition_UINT32:
+ case ColumnDefinition_UINT64:
+ case ColumnDefinition_BYTES:
+ case ColumnDefinition_BOOL:
+ default:
+ return fmt.Errorf("Column definition %s does not have a valid type.", definition.Name)
+ }
+
+ if definition.Key {
+ hasKey = true
+ }
+ }
+
+ if !hasKey {
+ return errors.New("Inavlid table. One or more columns must be a key.")
+ }
+
+ table := &Table{name, columnDefinitions}
+ tableBytes, err := proto.Marshal(table)
+ if err != nil {
+ return fmt.Errorf("Error marshalling table: %s", err)
+ }
+ tableNameKey, err := getTableNameKey(name)
+ if err != nil {
+ return fmt.Errorf("Error creating table key: %s", err)
+ }
+ err = stub.PutState(tableNameKey, tableBytes)
+ if err != nil {
+ return fmt.Errorf("Error inserting table in state: %s", err)
+ }
+ return nil
+}
+
+// GetTable returns the table for the specified table name or ErrTableNotFound
+// if the table does not exist.
+func (stub *ChaincodeStub) GetTable(tableName string) (*Table, error) {
+ return stub.getTable(tableName)
+}
+
+// DeleteTable deletes an entire table and all associated rows.
+func (stub *ChaincodeStub) DeleteTable(tableName string) error {
+ tableNameKey, err := getTableNameKey(tableName)
+ if err != nil {
+ return err
+ }
+
+ // Delete rows
+ iter, err := stub.RangeQueryState(tableNameKey+"1", tableNameKey+":")
+ if err != nil {
+ return fmt.Errorf("Error deleting table: %s", err)
+ }
+ defer iter.Close()
+ for iter.HasNext() {
+ key, _, err := iter.Next()
+ if err != nil {
+ return fmt.Errorf("Error deleting table: %s", err)
+ }
+ err = stub.DelState(key)
+ if err != nil {
+ return fmt.Errorf("Error deleting table: %s", err)
+ }
+ }
+
+ return stub.DelState(tableNameKey)
+}
+
+// InsertRow inserts a new row into the specified table.
+// Returns -
+// true and no error if the row is successfully inserted.
+// false and no error if a row already exists for the given key.
+// false and a TableNotFoundError if the specified table name does not exist.
+// false and an error if there is an unexpected error condition.
+func (stub *ChaincodeStub) InsertRow(tableName string, row Row) (bool, error) {
+ return stub.insertRowInternal(tableName, row, false)
+}
+
+// ReplaceRow updates the row in the specified table.
+// Returns -
+// true and no error if the row is successfully updated.
+// false and no error if a row does not exist the given key.
+// flase and a TableNotFoundError if the specified table name does not exist.
+// false and an error if there is an unexpected error condition.
+func (stub *ChaincodeStub) ReplaceRow(tableName string, row Row) (bool, error) {
+ return stub.insertRowInternal(tableName, row, true)
+}
+
+// GetRow fetches a row from the specified table for the given key.
+func (stub *ChaincodeStub) GetRow(tableName string, key []Column) (Row, error) {
+
+ var row Row
+
+ keyString, err := buildKeyString(tableName, key)
+ if err != nil {
+ return row, err
+ }
+
+ rowBytes, err := stub.GetState(keyString)
+ if err != nil {
+ return row, fmt.Errorf("Error fetching row from DB: %s", err)
+ }
+
+ err = proto.Unmarshal(rowBytes, &row)
+ if err != nil {
+ return row, fmt.Errorf("Error unmarshalling row: %s", err)
+ }
+
+ return row, nil
+
+}
+
+// GetRows returns multiple rows based on a partial key. For example, given table
+// | A | B | C | D |
+// where A, C and D are keys, GetRows can be called with [A, C] to return
+// all rows that have A, C and any value for D as their key. GetRows could
+// also be called with A only to return all rows that have A and any value
+// for C and D as their key.
+func (stub *ChaincodeStub) GetRows(tableName string, key []Column) (<-chan Row, error) {
+
+ keyString, err := buildKeyString(tableName, key)
+ if err != nil {
+ return nil, err
+ }
+
+ table, err := stub.getTable(tableName)
+ if err != nil {
+ return nil, err
+ }
+
+ // Need to check for special case where table has a single column
+ if len(table.GetColumnDefinitions()) < 2 && len(key) > 0 {
+
+ row, err := stub.GetRow(tableName, key)
+ if err != nil {
+ return nil, err
+ }
+ rows := make(chan Row)
+ go func() {
+ rows <- row
+ close(rows)
+ }()
+ return rows, nil
+ }
+
+ iter, err := stub.RangeQueryState(keyString+"1", keyString+":")
+ if err != nil {
+ return nil, fmt.Errorf("Error fetching rows: %s", err)
+ }
+ defer iter.Close()
+
+ rows := make(chan Row)
+
+ go func() {
+ for iter.HasNext() {
+ _, rowBytes, err := iter.Next()
+ if err != nil {
+ close(rows)
+ }
+
+ var row Row
+ err = proto.Unmarshal(rowBytes, &row)
+ if err != nil {
+ close(rows)
+ }
+
+ rows <- row
+
+ }
+ close(rows)
+ }()
+
+ return rows, nil
+
+}
+
+// DeleteRow deletes the row for the given key from the specified table.
+func (stub *ChaincodeStub) DeleteRow(tableName string, key []Column) error {
+
+ keyString, err := buildKeyString(tableName, key)
+ if err != nil {
+ return err
+ }
+
+ err = stub.DelState(keyString)
+ if err != nil {
+ return fmt.Errorf("DeleteRow operation error. Error deleting row: %s", err)
+ }
+
+ return nil
+}
+
+// VerifySignature verifies the transaction signature and returns `true` if
+// correct and `false` otherwise
+func (stub *ChaincodeStub) VerifySignature(certificate, signature, message []byte) (bool, error) {
+ // Instantiate a new SignatureVerifier
+ sv := ecdsa.NewX509ECDSASignatureVerifier()
+
+ // Verify the signature
+ return sv.Verify(certificate, signature, message)
+}
+
+// GetCallerCertificate returns caller certificate
+func (stub *ChaincodeStub) GetCallerCertificate() ([]byte, error) {
+ return stub.securityContext.CallerCert, nil
+}
+
+// GetCallerMetadata returns caller metadata
+func (stub *ChaincodeStub) GetCallerMetadata() ([]byte, error) {
+ return stub.securityContext.Metadata, nil
+}
+
+// GetBinding returns the transaction binding
+func (stub *ChaincodeStub) GetBinding() ([]byte, error) {
+ return stub.securityContext.Binding, nil
+}
+
+// GetPayload returns transaction payload, which is a `ChaincodeSpec` defined
+// in fabric/protos/chaincode.proto
+func (stub *ChaincodeStub) GetPayload() ([]byte, error) {
+ return stub.securityContext.Payload, nil
+}
+
+// GetTxTimestamp returns transaction created timestamp, which is currently
+// taken from the peer receiving the transaction. Note that this timestamp
+// may not be the same with the other peers' time.
+func (stub *ChaincodeStub) GetTxTimestamp() (*gp.Timestamp, error) {
+ return stub.securityContext.TxTimestamp, nil
+}
+
+func (stub *ChaincodeStub) getTable(tableName string) (*Table, error) {
+
+ tableName, err := getTableNameKey(tableName)
+ if err != nil {
+ return nil, err
+ }
+
+ tableBytes, err := stub.GetState(tableName)
+ if tableBytes == nil {
+ return nil, ErrTableNotFound
+ }
+ if err != nil {
+ return nil, fmt.Errorf("Error fetching table: %s", err)
+ }
+ table := &Table{}
+ err = proto.Unmarshal(tableBytes, table)
+ if err != nil {
+ return nil, fmt.Errorf("Error unmarshalling table: %s", err)
+ }
+
+ return table, nil
+}
+
+func validateTableName(name string) error {
+ if len(name) == 0 {
+ return errors.New("Inavlid table name. Table name must be 1 or more characters.")
+ }
+
+ return nil
+}
+
+func getTableNameKey(name string) (string, error) {
+ err := validateTableName(name)
+ if err != nil {
+ return "", err
+ }
+
+ return strconv.Itoa(len(name)) + name, nil
+}
+
+func buildKeyString(tableName string, keys []Column) (string, error) {
+
+ var keyBuffer bytes.Buffer
+
+ tableNameKey, err := getTableNameKey(tableName)
+ if err != nil {
+ return "", err
+ }
+
+ keyBuffer.WriteString(tableNameKey)
+
+ for _, key := range keys {
+
+ var keyString string
+ switch key.Value.(type) {
+ case *Column_String_:
+ keyString = key.GetString_()
+ case *Column_Int32:
+ // b := make([]byte, 4)
+ // binary.LittleEndian.PutUint32(b, uint32(key.GetInt32()))
+ // keyBuffer.Write(b)
+ keyString = strconv.FormatInt(int64(key.GetInt32()), 10)
+ case *Column_Int64:
+ keyString = strconv.FormatInt(key.GetInt64(), 10)
+ case *Column_Uint32:
+ keyString = strconv.FormatUint(uint64(key.GetUint32()), 10)
+ case *Column_Uint64:
+ keyString = strconv.FormatUint(key.GetUint64(), 10)
+ case *Column_Bytes:
+ keyString = string(key.GetBytes())
+ case *Column_Bool:
+ keyString = strconv.FormatBool(key.GetBool())
+ }
+
+ keyBuffer.WriteString(strconv.Itoa(len(keyString)))
+ keyBuffer.WriteString(keyString)
+ }
+
+ return keyBuffer.String(), nil
+}
+
+func getKeyAndVerifyRow(table Table, row Row) ([]Column, error) {
+
+ var keys []Column
+
+ if row.Columns == nil || len(row.Columns) != len(table.ColumnDefinitions) {
+ return keys, fmt.Errorf("Table '%s' defines %d columns, but row has %d columns.",
+ table.Name, len(table.ColumnDefinitions), len(row.Columns))
+ }
+
+ for i, column := range row.Columns {
+
+ // Check types
+ var expectedType bool
+ switch column.Value.(type) {
+ case *Column_String_:
+ expectedType = table.ColumnDefinitions[i].Type == ColumnDefinition_STRING
+ case *Column_Int32:
+ expectedType = table.ColumnDefinitions[i].Type == ColumnDefinition_INT32
+ case *Column_Int64:
+ expectedType = table.ColumnDefinitions[i].Type == ColumnDefinition_INT64
+ case *Column_Uint32:
+ expectedType = table.ColumnDefinitions[i].Type == ColumnDefinition_UINT32
+ case *Column_Uint64:
+ expectedType = table.ColumnDefinitions[i].Type == ColumnDefinition_UINT64
+ case *Column_Bytes:
+ expectedType = table.ColumnDefinitions[i].Type == ColumnDefinition_BYTES
+ case *Column_Bool:
+ expectedType = table.ColumnDefinitions[i].Type == ColumnDefinition_BOOL
+ default:
+ expectedType = false
+ }
+ if !expectedType {
+ return keys, fmt.Errorf("The type for table '%s', column '%s' is '%s', but the column in the row does not match.",
+ table.Name, table.ColumnDefinitions[i].Name, table.ColumnDefinitions[i].Type)
+ }
+
+ if table.ColumnDefinitions[i].Key {
+ keys = append(keys, *column)
+ }
+
+ }
+
+ return keys, nil
+}
+
+func (stub *ChaincodeStub) isRowPrsent(tableName string, key []Column) (bool, error) {
+ keyString, err := buildKeyString(tableName, key)
+ if err != nil {
+ return false, err
+ }
+ rowBytes, err := stub.GetState(keyString)
+ if err != nil {
+ return false, fmt.Errorf("Error fetching row for key %s: %s", keyString, err)
+ }
+ if rowBytes != nil {
+ return true, nil
+ }
+ return false, nil
+}
+
+// insertRowInternal inserts a new row into the specified table.
+// Returns -
+// true and no error if the row is successfully inserted.
+// false and no error if a row already exists for the given key.
+// flase and a TableNotFoundError if the specified table name does not exist.
+// false and an error if there is an unexpected error condition.
+func (stub *ChaincodeStub) insertRowInternal(tableName string, row Row, update bool) (bool, error) {
+
+ table, err := stub.getTable(tableName)
+ if err != nil {
+ return false, err
+ }
+
+ key, err := getKeyAndVerifyRow(*table, row)
+ if err != nil {
+ return false, err
+ }
+
+ present, err := stub.isRowPrsent(tableName, key)
+ if err != nil {
+ return false, err
+ }
+ if (present && !update) || (!present && update) {
+ return false, nil
+ }
+
+ rowBytes, err := proto.Marshal(&row)
+ if err != nil {
+ return false, fmt.Errorf("Error marshalling row: %s", err)
+ }
+
+ keyString, err := buildKeyString(tableName, key)
+ if err != nil {
+ return false, err
+ }
+ err = stub.PutState(keyString, rowBytes)
+ if err != nil {
+ return false, fmt.Errorf("Error inserting row in table %s: %s", tableName, err)
+ }
+
+ return true, nil
+}
+
+// ------------- ChaincodeEvent API ----------------------
+
+// SetEvent saves the event to be sent when a transaction is made part of a block
+func (stub *ChaincodeStub) SetEvent(name string, payload []byte) error {
+ stub.chaincodeEvent = &pb.ChaincodeEvent{EventName: name, Payload: payload}
+ return nil
+}
+
+// ------------- Logging Control and Chaincode Loggers ---------------
+
+// As independent programs, Go language chaincodes can use any logging
+// methodology they choose, from simple fmt.Printf() to os.Stdout, to
+// decorated logs created by the author's favorite logging package. The
+// chaincode "shim" interface, however, is defined by the Hyperledger fabric
+// and implements its own logging methodology. This methodology currently
+// includes severity-based logging control and a standard way of decorating
+// the logs.
+//
+// The facilities defined here allow a Go language chaincode to control the
+// logging level of its shim, and to create its own logs formatted
+// consistently with, and temporally interleaved with the shim logs without
+// any knowledge of the underlying implementation of the shim, and without any
+// other package requirements. The lack of package requirements is especially
+// important because even if the chaincode happened to explicitly use the same
+// logging package as the shim, unless the chaincode is physically included as
+// part of the hyperledger fabric source code tree it could actually end up
+// using a distinct binary instance of the logging package, with different
+// formats and severity levels than the binary package used by the shim.
+//
+// Another approach that might have been taken, and could potentially be taken
+// in the future, would be for the chaincode to supply a logging object for
+// the shim to use, rather than the other way around as implemented
+// here. There would be some complexities associated with that approach, so
+// for the moment we have chosen the simpler implementation below. The shim
+// provides one or more abstract logging objects for the chaincode to use via
+// the NewLogger() API, and allows the chaincode to control the severity level
+// of shim logs using the SetLoggingLevel() API.
+
+// LoggingLevel is an enumerated type of severity levels that control
+// chaincode logging.
+type LoggingLevel logging.Level
+
+// These constants comprise the LoggingLevel enumeration
+const (
+ LogDebug = LoggingLevel(logging.DEBUG)
+ LogInfo = LoggingLevel(logging.INFO)
+ LogNotice = LoggingLevel(logging.NOTICE)
+ LogWarning = LoggingLevel(logging.WARNING)
+ LogError = LoggingLevel(logging.ERROR)
+ LogCritical = LoggingLevel(logging.CRITICAL)
+)
+
+var shimLoggingLevel = LogDebug // Necessary for correct initialization; See Start()
+
+// SetLoggingLevel allows a Go language chaincode to set the logging level of
+// its shim.
+func SetLoggingLevel(level LoggingLevel) {
+ shimLoggingLevel = level
+ logging.SetLevel(logging.Level(level), "shim")
+}
+
+// LogLevel converts a case-insensitive string chosen from CRITICAL, ERROR,
+// WARNING, NOTICE, INFO or DEBUG into an element of the LoggingLevel
+// type. In the event of errors the level returned is LogError.
+func LogLevel(levelString string) (LoggingLevel, error) {
+ l, err := logging.LogLevel(levelString)
+ level := LoggingLevel(l)
+ if err != nil {
+ level = LogError
+ }
+ return level, err
+}
+
+// ------------- Chaincode Loggers ---------------
+
+// ChaincodeLogger is an abstraction of a logging object for use by
+// chaincodes. These objects are created by the NewLogger API.
+type ChaincodeLogger struct {
+ logger *logging.Logger
+}
+
+// NewLogger allows a Go language chaincode to create one or more logging
+// objects whose logs will be formatted consistently with, and temporally
+// interleaved with the logs created by the shim interface. The logs created
+// by this object can be distinguished from shim logs by the name provided,
+// which will appear in the logs.
+func NewLogger(name string) *ChaincodeLogger {
+ return &ChaincodeLogger{logging.MustGetLogger(name)}
+}
+
+// SetLevel sets the logging level for a chaincode logger. Note that currently
+// the levels are actually controlled by the name given when the logger is
+// created, so loggers should be given unique names other than "shim".
+func (c *ChaincodeLogger) SetLevel(level LoggingLevel) {
+ logging.SetLevel(logging.Level(level), c.logger.Module)
+}
+
+// IsEnabledFor returns true if the logger is enabled to creates logs at the
+// given logging level.
+func (c *ChaincodeLogger) IsEnabledFor(level LoggingLevel) bool {
+ return c.logger.IsEnabledFor(logging.Level(level))
+}
+
+// Debug logs will only appear if the ChaincodeLogger LoggingLevel is set to
+// LogDebug.
+func (c *ChaincodeLogger) Debug(args ...interface{}) {
+ c.logger.Debug(args...)
+}
+
+// Info logs will appear if the ChaincodeLogger LoggingLevel is set to
+// LogInfo or LogDebug.
+func (c *ChaincodeLogger) Info(args ...interface{}) {
+ c.logger.Info(args...)
+}
+
+// Notice logs will appear if the ChaincodeLogger LoggingLevel is set to
+// LogNotice, LogInfo or LogDebug.
+func (c *ChaincodeLogger) Notice(args ...interface{}) {
+ c.logger.Notice(args...)
+}
+
+// Warning logs will appear if the ChaincodeLogger LoggingLevel is set to
+// LogWarning, LogNotice, LogInfo or LogDebug.
+func (c *ChaincodeLogger) Warning(args ...interface{}) {
+ c.logger.Warning(args...)
+}
+
+// Error logs will appear if the ChaincodeLogger LoggingLevel is set to
+// LogError, LogWarning, LogNotice, LogInfo or LogDebug.
+func (c *ChaincodeLogger) Error(args ...interface{}) {
+ c.logger.Error(args...)
+}
+
+// Critical logs always appear; They can not be disabled.
+func (c *ChaincodeLogger) Critical(args ...interface{}) {
+ c.logger.Critical(args...)
+}
+
+// Debugf logs will only appear if the ChaincodeLogger LoggingLevel is set to
+// LogDebug.
+func (c *ChaincodeLogger) Debugf(format string, args ...interface{}) {
+ c.logger.Debugf(format, args...)
+}
+
+// Infof logs will appear if the ChaincodeLogger LoggingLevel is set to
+// LogInfo or LogDebug.
+func (c *ChaincodeLogger) Infof(format string, args ...interface{}) {
+ c.logger.Infof(format, args...)
+}
+
+// Noticef logs will appear if the ChaincodeLogger LoggingLevel is set to
+// LogNotice, LogInfo or LogDebug.
+func (c *ChaincodeLogger) Noticef(format string, args ...interface{}) {
+ c.logger.Noticef(format, args...)
+}
+
+// Warningf logs will appear if the ChaincodeLogger LoggingLevel is set to
+// LogWarning, LogNotice, LogInfo or LogDebug.
+func (c *ChaincodeLogger) Warningf(format string, args ...interface{}) {
+ c.logger.Warningf(format, args...)
+}
+
+// Errorf logs will appear if the ChaincodeLogger LoggingLevel is set to
+// LogError, LogWarning, LogNotice, LogInfo or LogDebug.
+func (c *ChaincodeLogger) Errorf(format string, args ...interface{}) {
+ c.logger.Errorf(format, args...)
+}
+
+// Criticalf logs always appear; They can not be disabled.
+func (c *ChaincodeLogger) Criticalf(format string, args ...interface{}) {
+ c.logger.Criticalf(format, args...)
+}
diff --git a/core/chaincode/shim/chaincode.pb.go b/core/chaincode/shim/chaincode.pb.go
new file mode 100644
index 00000000000..ad544a3951a
--- /dev/null
+++ b/core/chaincode/shim/chaincode.pb.go
@@ -0,0 +1,318 @@
+// Code generated by protoc-gen-go.
+// source: chaincode/shim/chaincode.proto
+// DO NOT EDIT!
+
+/*
+Package shim is a generated protocol buffer package.
+
+It is generated from these files:
+ chaincode/shim/chaincode.proto
+
+It has these top-level messages:
+ ColumnDefinition
+ Table
+ Column
+ Row
+*/
+package shim
+
+import proto "github.com/golang/protobuf/proto"
+import fmt "fmt"
+import math "math"
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+type ColumnDefinition_Type int32
+
+const (
+ ColumnDefinition_STRING ColumnDefinition_Type = 0
+ ColumnDefinition_INT32 ColumnDefinition_Type = 1
+ ColumnDefinition_INT64 ColumnDefinition_Type = 2
+ ColumnDefinition_UINT32 ColumnDefinition_Type = 3
+ ColumnDefinition_UINT64 ColumnDefinition_Type = 4
+ ColumnDefinition_BYTES ColumnDefinition_Type = 5
+ ColumnDefinition_BOOL ColumnDefinition_Type = 6
+)
+
+var ColumnDefinition_Type_name = map[int32]string{
+ 0: "STRING",
+ 1: "INT32",
+ 2: "INT64",
+ 3: "UINT32",
+ 4: "UINT64",
+ 5: "BYTES",
+ 6: "BOOL",
+}
+var ColumnDefinition_Type_value = map[string]int32{
+ "STRING": 0,
+ "INT32": 1,
+ "INT64": 2,
+ "UINT32": 3,
+ "UINT64": 4,
+ "BYTES": 5,
+ "BOOL": 6,
+}
+
+func (x ColumnDefinition_Type) String() string {
+ return proto.EnumName(ColumnDefinition_Type_name, int32(x))
+}
+
+type ColumnDefinition struct {
+ Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
+ Type ColumnDefinition_Type `protobuf:"varint,2,opt,name=type,enum=shim.ColumnDefinition_Type" json:"type,omitempty"`
+ Key bool `protobuf:"varint,3,opt,name=key" json:"key,omitempty"`
+}
+
+func (m *ColumnDefinition) Reset() { *m = ColumnDefinition{} }
+func (m *ColumnDefinition) String() string { return proto.CompactTextString(m) }
+func (*ColumnDefinition) ProtoMessage() {}
+
+type Table struct {
+ Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
+ ColumnDefinitions []*ColumnDefinition `protobuf:"bytes,2,rep,name=columnDefinitions" json:"columnDefinitions,omitempty"`
+}
+
+func (m *Table) Reset() { *m = Table{} }
+func (m *Table) String() string { return proto.CompactTextString(m) }
+func (*Table) ProtoMessage() {}
+
+func (m *Table) GetColumnDefinitions() []*ColumnDefinition {
+ if m != nil {
+ return m.ColumnDefinitions
+ }
+ return nil
+}
+
+type Column struct {
+ // Types that are valid to be assigned to Value:
+ // *Column_String_
+ // *Column_Int32
+ // *Column_Int64
+ // *Column_Uint32
+ // *Column_Uint64
+ // *Column_Bytes
+ // *Column_Bool
+ Value isColumn_Value `protobuf_oneof:"value"`
+}
+
+func (m *Column) Reset() { *m = Column{} }
+func (m *Column) String() string { return proto.CompactTextString(m) }
+func (*Column) ProtoMessage() {}
+
+type isColumn_Value interface {
+ isColumn_Value()
+}
+
+type Column_String_ struct {
+ String_ string `protobuf:"bytes,1,opt,name=string,oneof"`
+}
+type Column_Int32 struct {
+ Int32 int32 `protobuf:"varint,2,opt,name=int32,oneof"`
+}
+type Column_Int64 struct {
+ Int64 int64 `protobuf:"varint,3,opt,name=int64,oneof"`
+}
+type Column_Uint32 struct {
+ Uint32 uint32 `protobuf:"varint,4,opt,name=uint32,oneof"`
+}
+type Column_Uint64 struct {
+ Uint64 uint64 `protobuf:"varint,5,opt,name=uint64,oneof"`
+}
+type Column_Bytes struct {
+ Bytes []byte `protobuf:"bytes,6,opt,name=bytes,proto3,oneof"`
+}
+type Column_Bool struct {
+ Bool bool `protobuf:"varint,7,opt,name=bool,oneof"`
+}
+
+func (*Column_String_) isColumn_Value() {}
+func (*Column_Int32) isColumn_Value() {}
+func (*Column_Int64) isColumn_Value() {}
+func (*Column_Uint32) isColumn_Value() {}
+func (*Column_Uint64) isColumn_Value() {}
+func (*Column_Bytes) isColumn_Value() {}
+func (*Column_Bool) isColumn_Value() {}
+
+func (m *Column) GetValue() isColumn_Value {
+ if m != nil {
+ return m.Value
+ }
+ return nil
+}
+
+func (m *Column) GetString_() string {
+ if x, ok := m.GetValue().(*Column_String_); ok {
+ return x.String_
+ }
+ return ""
+}
+
+func (m *Column) GetInt32() int32 {
+ if x, ok := m.GetValue().(*Column_Int32); ok {
+ return x.Int32
+ }
+ return 0
+}
+
+func (m *Column) GetInt64() int64 {
+ if x, ok := m.GetValue().(*Column_Int64); ok {
+ return x.Int64
+ }
+ return 0
+}
+
+func (m *Column) GetUint32() uint32 {
+ if x, ok := m.GetValue().(*Column_Uint32); ok {
+ return x.Uint32
+ }
+ return 0
+}
+
+func (m *Column) GetUint64() uint64 {
+ if x, ok := m.GetValue().(*Column_Uint64); ok {
+ return x.Uint64
+ }
+ return 0
+}
+
+func (m *Column) GetBytes() []byte {
+ if x, ok := m.GetValue().(*Column_Bytes); ok {
+ return x.Bytes
+ }
+ return nil
+}
+
+func (m *Column) GetBool() bool {
+ if x, ok := m.GetValue().(*Column_Bool); ok {
+ return x.Bool
+ }
+ return false
+}
+
+// XXX_OneofFuncs is for the internal use of the proto package.
+func (*Column) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), []interface{}) {
+ return _Column_OneofMarshaler, _Column_OneofUnmarshaler, []interface{}{
+ (*Column_String_)(nil),
+ (*Column_Int32)(nil),
+ (*Column_Int64)(nil),
+ (*Column_Uint32)(nil),
+ (*Column_Uint64)(nil),
+ (*Column_Bytes)(nil),
+ (*Column_Bool)(nil),
+ }
+}
+
+func _Column_OneofMarshaler(msg proto.Message, b *proto.Buffer) error {
+ m := msg.(*Column)
+ // value
+ switch x := m.Value.(type) {
+ case *Column_String_:
+ b.EncodeVarint(1<<3 | proto.WireBytes)
+ b.EncodeStringBytes(x.String_)
+ case *Column_Int32:
+ b.EncodeVarint(2<<3 | proto.WireVarint)
+ b.EncodeVarint(uint64(x.Int32))
+ case *Column_Int64:
+ b.EncodeVarint(3<<3 | proto.WireVarint)
+ b.EncodeVarint(uint64(x.Int64))
+ case *Column_Uint32:
+ b.EncodeVarint(4<<3 | proto.WireVarint)
+ b.EncodeVarint(uint64(x.Uint32))
+ case *Column_Uint64:
+ b.EncodeVarint(5<<3 | proto.WireVarint)
+ b.EncodeVarint(uint64(x.Uint64))
+ case *Column_Bytes:
+ b.EncodeVarint(6<<3 | proto.WireBytes)
+ b.EncodeRawBytes(x.Bytes)
+ case *Column_Bool:
+ t := uint64(0)
+ if x.Bool {
+ t = 1
+ }
+ b.EncodeVarint(7<<3 | proto.WireVarint)
+ b.EncodeVarint(t)
+ case nil:
+ default:
+ return fmt.Errorf("Column.Value has unexpected type %T", x)
+ }
+ return nil
+}
+
+func _Column_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) {
+ m := msg.(*Column)
+ switch tag {
+ case 1: // value.string
+ if wire != proto.WireBytes {
+ return true, proto.ErrInternalBadWireType
+ }
+ x, err := b.DecodeStringBytes()
+ m.Value = &Column_String_{x}
+ return true, err
+ case 2: // value.int32
+ if wire != proto.WireVarint {
+ return true, proto.ErrInternalBadWireType
+ }
+ x, err := b.DecodeVarint()
+ m.Value = &Column_Int32{int32(x)}
+ return true, err
+ case 3: // value.int64
+ if wire != proto.WireVarint {
+ return true, proto.ErrInternalBadWireType
+ }
+ x, err := b.DecodeVarint()
+ m.Value = &Column_Int64{int64(x)}
+ return true, err
+ case 4: // value.uint32
+ if wire != proto.WireVarint {
+ return true, proto.ErrInternalBadWireType
+ }
+ x, err := b.DecodeVarint()
+ m.Value = &Column_Uint32{uint32(x)}
+ return true, err
+ case 5: // value.uint64
+ if wire != proto.WireVarint {
+ return true, proto.ErrInternalBadWireType
+ }
+ x, err := b.DecodeVarint()
+ m.Value = &Column_Uint64{x}
+ return true, err
+ case 6: // value.bytes
+ if wire != proto.WireBytes {
+ return true, proto.ErrInternalBadWireType
+ }
+ x, err := b.DecodeRawBytes(true)
+ m.Value = &Column_Bytes{x}
+ return true, err
+ case 7: // value.bool
+ if wire != proto.WireVarint {
+ return true, proto.ErrInternalBadWireType
+ }
+ x, err := b.DecodeVarint()
+ m.Value = &Column_Bool{x != 0}
+ return true, err
+ default:
+ return false, nil
+ }
+}
+
+type Row struct {
+ Columns []*Column `protobuf:"bytes,1,rep,name=columns" json:"columns,omitempty"`
+}
+
+func (m *Row) Reset() { *m = Row{} }
+func (m *Row) String() string { return proto.CompactTextString(m) }
+func (*Row) ProtoMessage() {}
+
+func (m *Row) GetColumns() []*Column {
+ if m != nil {
+ return m.Columns
+ }
+ return nil
+}
+
+func init() {
+ proto.RegisterEnum("shim.ColumnDefinition_Type", ColumnDefinition_Type_name, ColumnDefinition_Type_value)
+}
diff --git a/core/chaincode/shim/chaincode.proto b/core/chaincode/shim/chaincode.proto
new file mode 100644
index 00000000000..d4d530c769d
--- /dev/null
+++ b/core/chaincode/shim/chaincode.proto
@@ -0,0 +1,55 @@
+/*
+Copyright IBM Corp. 2016 All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+syntax = "proto3";
+
+package shim;
+
+message ColumnDefinition {
+ string name = 1;
+ enum Type {
+ STRING = 0;
+ INT32 = 1;
+ INT64 = 2;
+ UINT32 = 3;
+ UINT64 = 4;
+ BYTES = 5;
+ BOOL = 6;
+ }
+ Type type = 2;
+ bool key = 3;
+}
+
+message Table {
+ string name = 1;
+ repeated ColumnDefinition columnDefinitions = 2;
+}
+
+message Column {
+ oneof value {
+ string string = 1;
+ int32 int32 = 2;
+ int64 int64 = 3;
+ uint32 uint32 = 4;
+ uint64 uint64 = 5;
+ bytes bytes = 6;
+ bool bool = 7;
+ }
+}
+
+message Row {
+ repeated Column columns = 1;
+}
diff --git a/core/chaincode/shim/crypto/attr/attr_support.go b/core/chaincode/shim/crypto/attr/attr_support.go
new file mode 100644
index 00000000000..35883c66001
--- /dev/null
+++ b/core/chaincode/shim/crypto/attr/attr_support.go
@@ -0,0 +1,214 @@
+/*
+Copyright IBM Corp. 2016 All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package attr
+
+import (
+ "bytes"
+ "crypto/x509"
+ "errors"
+
+ "github.com/hyperledger/fabric/core/crypto/attributes"
+ "github.com/hyperledger/fabric/core/crypto/primitives"
+)
+
+//Attribute defines a name, value pair to be verified.
+type Attribute struct {
+ Name string
+ Value []byte
+}
+
+// chaincodeHolder is the struct that hold the certificate and the metadata. An implementation is ChaincodeStub
+type chaincodeHolder interface {
+ // GetCallerCertificate returns caller certificate
+ GetCallerCertificate() ([]byte, error)
+
+ // GetCallerMetadata returns caller metadata
+ /*
+ TODO: ##attributes-keys-pending This code have be redefined to avoid use of metadata field.
+ GetCallerMetadata() ([]byte, error)
+ */
+}
+
+//AttributesHandler is an entity can be used to both verify and read attributes.
+// The functions declared can be used to access the attributes stored in the transaction certificates from the application layer. Can be used directly from the ChaincodeStub API but
+// if you need multiple access create a hanlder is better:
+// Multiple accesses
+// If multiple calls to the functions above are required, a best practice is to create an AttributesHandler instead of calling the functions multiple times, this practice will avoid creating a new AttributesHandler for each of these calls thus eliminating an unnecessary overhead.
+// Example:
+//
+// AttributesHandler, err := ac.NewAttributesHandlerImpl(stub)
+// if err != nil {
+// return false, err
+// }
+// AttributesHandler.VerifyAttribute(attributeName, attributeValue)
+// ... you can make other verifications and/or read attribute values by using the AttributesHandler
+type AttributesHandler interface {
+
+ //VerifyAttributes does the same as VerifyAttribute but it checks for a list of attributes and their respective values instead of a single attribute/value pair
+ // Example:
+ // containsAttrs, error:= handler.VerifyAttributes(&ac.Attribute{"position", "Software Engineer"}, &ac.Attribute{"company", "ACompany"})
+ VerifyAttributes(attrs ...*Attribute) (bool, error)
+
+ //VerifyAttribute is used to verify if the transaction certificate has an attribute with name *attributeName* and value *attributeValue* which are the input parameters received by this function.
+ //Example:
+ // containsAttr, error := handler.VerifyAttribute("position", "Software Engineer")
+ VerifyAttribute(attributeName string, attributeValue []byte) (bool, error)
+
+ //GetValue is used to read an specific attribute from the transaction certificate, *attributeName* is passed as input parameter to this function.
+ // Example:
+ // attrValue,error:=handler.GetValue("position")
+ GetValue(attributeName string) ([]byte, error)
+}
+
+//AttributesHandlerImpl is an implementation of AttributesHandler interface.
+type AttributesHandlerImpl struct {
+ cert *x509.Certificate
+ cache map[string][]byte
+ keys map[string][]byte
+ header map[string]int
+ encrypted bool
+}
+
+type chaincodeHolderImpl struct {
+ Certificate []byte
+}
+
+// GetCallerCertificate returns caller certificate
+func (holderImpl *chaincodeHolderImpl) GetCallerCertificate() ([]byte, error) {
+ return holderImpl.Certificate, nil
+}
+
+//GetValueFrom returns the value of 'attributeName0' from a cert.
+func GetValueFrom(attributeName string, cert []byte) ([]byte, error) {
+ handler, err := NewAttributesHandlerImpl(&chaincodeHolderImpl{Certificate: cert})
+ if err != nil {
+ return nil, err
+ }
+ return handler.GetValue(attributeName)
+}
+
+//NewAttributesHandlerImpl creates a new AttributesHandlerImpl from a pb.ChaincodeSecurityContext object.
+func NewAttributesHandlerImpl(holder chaincodeHolder) (*AttributesHandlerImpl, error) {
+ // Getting certificate
+ certRaw, err := holder.GetCallerCertificate()
+ if err != nil {
+ return nil, err
+ }
+ if certRaw == nil {
+ return nil, errors.New("The certificate can't be nil.")
+ }
+ var tcert *x509.Certificate
+ tcert, err = primitives.DERToX509Certificate(certRaw)
+ if err != nil {
+ return nil, err
+ }
+
+ keys := make(map[string][]byte)
+
+ /*
+ TODO: ##attributes-keys-pending This code have be redefined to avoid use of metadata field.
+
+ //Getting Attributes Metadata from security context.
+ var attrsMetadata *attributespb.AttributesMetadata
+ var rawMetadata []byte
+ rawMetadata, err = holder.GetCallerMetadata()
+ if err != nil {
+ return nil, err
+ }
+
+ if rawMetadata != nil {
+ attrsMetadata, err = attributes.GetAttributesMetadata(rawMetadata)
+ if err == nil {
+ for _, entry := range attrsMetadata.Entries {
+ keys[entry.AttributeName] = entry.AttributeKey
+ }
+ }
+ }*/
+
+ cache := make(map[string][]byte)
+ return &AttributesHandlerImpl{tcert, cache, keys, nil, false}, nil
+}
+
+func (attributesHandler *AttributesHandlerImpl) readHeader() (map[string]int, bool, error) {
+ if attributesHandler.header != nil {
+ return attributesHandler.header, attributesHandler.encrypted, nil
+ }
+ header, encrypted, err := attributes.ReadAttributeHeader(attributesHandler.cert, attributesHandler.keys[attributes.HeaderAttributeName])
+ if err != nil {
+ return nil, false, err
+ }
+ attributesHandler.header = header
+ attributesHandler.encrypted = encrypted
+ return header, encrypted, nil
+}
+
+//GetValue is used to read an specific attribute from the transaction certificate, *attributeName* is passed as input parameter to this function.
+// Example:
+// attrValue,error:=handler.GetValue("position")
+func (attributesHandler *AttributesHandlerImpl) GetValue(attributeName string) ([]byte, error) {
+ if attributesHandler.cache[attributeName] != nil {
+ return attributesHandler.cache[attributeName], nil
+ }
+ header, encrypted, err := attributesHandler.readHeader()
+ if err != nil {
+ return nil, err
+ }
+ value, err := attributes.ReadTCertAttributeByPosition(attributesHandler.cert, header[attributeName])
+ if err != nil {
+ return nil, errors.New("Error reading attribute value '" + err.Error() + "'")
+ }
+
+ if encrypted {
+ if attributesHandler.keys[attributeName] == nil {
+ return nil, errors.New("Cannot find decryption key for attribute")
+ }
+
+ value, err = attributes.DecryptAttributeValue(attributesHandler.keys[attributeName], value)
+ if err != nil {
+ return nil, errors.New("Error decrypting value '" + err.Error() + "'")
+ }
+ }
+ attributesHandler.cache[attributeName] = value
+ return value, nil
+}
+
+//VerifyAttribute is used to verify if the transaction certificate has an attribute with name *attributeName* and value *attributeValue* which are the input parameters received by this function.
+// Example:
+// containsAttr, error := handler.VerifyAttribute("position", "Software Engineer")
+func (attributesHandler *AttributesHandlerImpl) VerifyAttribute(attributeName string, attributeValue []byte) (bool, error) {
+ valueHash, err := attributesHandler.GetValue(attributeName)
+ if err != nil {
+ return false, err
+ }
+ return bytes.Compare(valueHash, attributeValue) == 0, nil
+}
+
+//VerifyAttributes does the same as VerifyAttribute but it checks for a list of attributes and their respective values instead of a single attribute/value pair
+// Example:
+// containsAttrs, error:= handler.VerifyAttributes(&ac.Attribute{"position", "Software Engineer"}, &ac.Attribute{"company", "ACompany"})
+func (attributesHandler *AttributesHandlerImpl) VerifyAttributes(attrs ...*Attribute) (bool, error) {
+ for _, attribute := range attrs {
+ val, err := attributesHandler.VerifyAttribute(attribute.Name, attribute.Value)
+ if err != nil {
+ return false, err
+ }
+ if !val {
+ return val, nil
+ }
+ }
+ return true, nil
+}
diff --git a/core/chaincode/shim/crypto/attr/attr_support_test.go b/core/chaincode/shim/crypto/attr/attr_support_test.go
new file mode 100644
index 00000000000..53972c4f6bd
--- /dev/null
+++ b/core/chaincode/shim/crypto/attr/attr_support_test.go
@@ -0,0 +1,681 @@
+/*
+Copyright IBM Corp. 2016 All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package attr
+
+import (
+ "bytes"
+ "crypto/x509"
+ "encoding/pem"
+ "errors"
+ "io/ioutil"
+ "testing"
+
+ "github.com/hyperledger/fabric/core/crypto/primitives"
+)
+
+var (
+ attributeNames = []string{"company", "position"}
+)
+
+type chaincodeStubMock struct {
+ callerCert []byte
+ /*
+ TODO: ##attributes-keys-pending This code have be redefined to avoid use of metadata field.
+ metadata []byte
+ */
+}
+
+// GetCallerCertificate returns caller certificate
+func (shim *chaincodeStubMock) GetCallerCertificate() ([]byte, error) {
+ return shim.callerCert, nil
+}
+
+/*
+ TODO: ##attributes-keys-pending This code have be redefined to avoid use of metadata field.
+// GetCallerMetadata returns caller metadata
+func (shim *chaincodeStubMock) GetCallerMetadata() ([]byte, error) {
+ return shim.metadata, nil
+}
+*/
+
+type certErrorMock struct {
+ /*
+ TODO: ##attributes-keys-pending This code have be redefined to avoid use of metadata field.
+ metadata []byte
+ */
+}
+
+// GetCallerCertificate returns caller certificate
+func (shim *certErrorMock) GetCallerCertificate() ([]byte, error) {
+ return nil, errors.New("GetCallerCertificate error")
+}
+
+/*
+ TODO: ##attributes-keys-pending This code have be redefined to avoid use of metadata field.
+// GetCallerMetadata returns caller metadata
+func (shim *certErrorMock) GetCallerMetadata() ([]byte, error) {
+ return shim.metadata, nil
+}*/
+
+type metadataErrorMock struct {
+ callerCert []byte
+}
+
+// GetCallerCertificate returns caller certificate
+func (shim *metadataErrorMock) GetCallerCertificate() ([]byte, error) {
+ return shim.callerCert, nil
+}
+
+/*
+ TODO: ##attributes-keys-pending This code have be redefined to avoid use of metadata field.
+// GetCallerMetadata returns caller metadata
+func (shim *metadataErrorMock) GetCallerMetadata() ([]byte, error) {
+ return nil, errors.New("GetCallerCertificate error")
+}*/
+
+func TestVerifyAttribute(t *testing.T) {
+ primitives.SetSecurityLevel("SHA3", 256)
+
+ /*
+ TODO: ##attributes-keys-pending This code have be redefined to avoid use of metadata field.
+ tcert, prek0, err := loadTCertAndPreK0() */
+ tcert, err := loadTCertClear()
+ if err != nil {
+ t.Error(err)
+ }
+ tcertder := tcert.Raw
+ /*
+ TODO: ##attributes-keys-pending This code have be redefined to avoid use of metadata field.
+ metadata := []byte{32, 64}
+
+ attributeMetadata, err := attributes.CreateAttributesMetadata(tcertder, metadata, prek0, attributeNames)
+ if err != nil {
+ t.Error(err)
+ }
+ stub := &chaincodeStubMock{callerCert: tcertder, metadata: attributeMetadata}*/
+ stub := &chaincodeStubMock{callerCert: tcertder}
+ handler, err := NewAttributesHandlerImpl(stub)
+ if err != nil {
+ t.Error(err)
+ }
+
+ isOk, err := handler.VerifyAttribute("position", []byte("Software Engineer"))
+ if err != nil {
+ t.Error(err)
+ }
+
+ if !isOk {
+ t.Fatal("Attribute not verified.")
+ }
+}
+
+/*
+ TODO: ##attributes-keys-pending This code have be redefined to avoid use of metadata field.
+func TestVerifyAttribute_InvalidAttributeMetadata(t *testing.T) {
+ primitives.SetSecurityLevel("SHA3", 256)
+
+ tcert, _, err := loadTCertAndPreK0()
+ if err != nil {
+ t.Error(err)
+ }
+
+ tcertder := tcert.Raw
+
+ attributeMetadata := []byte{123, 22, 34, 56, 78, 44}
+
+ stub := &chaincodeStubMock{callerCert: tcertder, metadata: attributeMetadata}
+ handler, err := NewAttributesHandlerImpl(stub)
+ if err != nil {
+ t.Error(err)
+ }
+ keySize := len(handler.keys)
+ if keySize != 0 {
+ t.Errorf("Test failed expected [%v] keys but found [%v]", keySize, 0)
+ }
+}*/
+
+func TestNewAttributesHandlerImpl_CertificateError(t *testing.T) {
+ primitives.SetSecurityLevel("SHA3", 256)
+
+ /*
+ TODO: ##attributes-keys-pending This code have be redefined to avoid use of metadata field.
+ tcert, prek0, err := loadTCertAndPreK0()
+ tcert, err := loadTCertClear()
+ if err != nil {
+ t.Error(err)
+ }
+ tcertder := tcert.Raw
+ metadata := []byte{32, 64}
+ attributeMetadata, err := attributes.CreateAttributesMetadata(tcertder, metadata, prek0, attributeNames)
+ if err != nil {
+ t.Error(err)
+ }
+ stub := &certErrorMock{metadata: attributeMetadata}*/
+ stub := &certErrorMock{}
+ _, err := NewAttributesHandlerImpl(stub)
+ if err == nil {
+ t.Fatal("Error shouldn't be nil")
+ }
+}
+
+/*
+ TODO: ##attributes-keys-pending This code have be redefined to avoid use of metadata field.
+func TestNewAttributesHandlerImpl_MetadataError(t *testing.T) {
+ primitives.SetSecurityLevel("SHA3", 256)
+
+ tcert, _, err := loadTCertAndPreK0()
+ if err != nil {
+ t.Error(err)
+ }
+ tcertder := tcert.Raw
+ if err != nil {
+ t.Error(err)
+ }
+ stub := &metadataErrorMock{callerCert: tcertder}
+ _, err = NewAttributesHandlerImpl(stub)
+ if err == nil {
+ t.Fatal("Error shouldn't be nil")
+ }
+}*/
+
+func TestNewAttributesHandlerImpl_InvalidCertificate(t *testing.T) {
+ primitives.SetSecurityLevel("SHA3", 256)
+ tcert, _, err := loadTCertAndPreK0()
+ if err != nil {
+ t.Error(err)
+ }
+ tcertder := tcert.Raw
+ if err != nil {
+ t.Error(err)
+ }
+ tcertder[0] = tcertder[0] + 1
+ stub := &metadataErrorMock{callerCert: tcertder}
+ _, err = NewAttributesHandlerImpl(stub)
+ if err == nil {
+ t.Fatal("Error shouldn't be nil")
+ }
+}
+
+func TestNewAttributesHandlerImpl_NullCertificate(t *testing.T) {
+ primitives.SetSecurityLevel("SHA3", 256)
+
+ /*
+ TODO: ##attributes-keys-pending This code have be redefined to avoid use of metadata field.
+ tcert, prek0, err := loadTCertAndPreK0()
+ tcert, err := loadTCertClear()
+ if err != nil {
+ t.Error(err)
+ }
+ metadata := []byte{32, 64}
+ tcertder := tcert.Raw
+ attributeMetadata, err := attributes.CreateAttributesMetadata(tcertder, metadata, prek0, attributeNames)
+ if err != nil {
+ t.Error(err)
+ }
+ stub := &chaincodeStubMock{callerCert: nil, metadata: attributeMetadata}*/
+ stub := &chaincodeStubMock{callerCert: nil}
+ _, err := NewAttributesHandlerImpl(stub)
+ if err == nil {
+ t.Fatal("Error can't be nil.")
+ }
+}
+
+/*
+ TODO: ##attributes-keys-pending This code have be redefined to avoid use of metadata field.
+func TestNewAttributesHandlerImpl_NullMetadata(t *testing.T) {
+ primitives.SetSecurityLevel("SHA3", 256)
+
+ tcert, _, err := loadTCertAndPreK0()
+ if err != nil {
+ t.Error(err)
+ }
+ tcertder := tcert.Raw
+ if err != nil {
+ t.Error(err)
+ }
+ stub := &chaincodeStubMock{callerCert: tcertder, metadata: nil}
+ handler, err := NewAttributesHandlerImpl(stub)
+ if err != nil {
+ t.Error(err)
+ }
+ keySize := len(handler.keys)
+ if keySize != 0 {
+ t.Errorf("Test failed expected [%v] keys but found [%v]", keySize, 0)
+ }
+}*/
+
+func TestVerifyAttributes(t *testing.T) {
+ primitives.SetSecurityLevel("SHA3", 256)
+
+ /*
+ TODO: ##attributes-keys-pending This code have be redefined to avoid use of metadata field.
+ tcert, prek0, err := loadTCertAndPreK0() */
+ tcert, err := loadTCertClear()
+ if err != nil {
+ t.Error(err)
+ }
+ tcertder := tcert.Raw
+
+ /*
+ TODO: ##attributes-keys-pending This code have be redefined to avoid use of metadata field.
+ metadata := []byte{32,64}
+ attributeMetadata, err := attributes.CreateAttributesMetadata(tcertder, metadata, prek0, attributeNames)
+ if err != nil {
+ t.Error(err)
+ }
+ stub := &chaincodeStubMock{callerCert: tcertder, metadata: attributeMetadata} */
+ stub := &chaincodeStubMock{callerCert: tcertder}
+ handler, err := NewAttributesHandlerImpl(stub)
+ if err != nil {
+ t.Error(err)
+ }
+
+ isOk, err := handler.VerifyAttributes(&Attribute{Name: "position", Value: []byte("Software Engineer")})
+ if err != nil {
+ t.Error(err)
+ }
+
+ if !isOk {
+ t.Fatal("Attribute not verified.")
+ }
+}
+
+func TestVerifyAttributes_Invalid(t *testing.T) {
+ primitives.SetSecurityLevel("SHA3", 256)
+
+ /*
+ TODO: ##attributes-keys-pending This code have be redefined to avoid use of metadata field.
+ tcert, prek0, err := loadTCertAndPreK0() */
+ tcert, err := loadTCertClear()
+ if err != nil {
+ t.Error(err)
+ }
+
+ tcertder := tcert.Raw
+ /*
+ TODO: ##attributes-keys-pending This code have be redefined to avoid use of metadata field.
+ metadata := []byte{32,64}
+ attributeMetadata, err := attributes.CreateAttributesMetadata(tcertder, metadata, prek0, attributeNames)
+ if err != nil {
+ t.Error(err)
+ }
+ stub := &chaincodeStubMock{callerCert: tcertder, metadata: attributeMetadata}*/
+ stub := &chaincodeStubMock{callerCert: tcertder}
+ handler, err := NewAttributesHandlerImpl(stub)
+ if err != nil {
+ t.Error(err)
+ }
+
+ isOk, err := handler.VerifyAttributes(&Attribute{Name: "position", Value: []byte("Software Engineer")}, &Attribute{Name: "position", Value: []byte("18")})
+ if err != nil {
+ t.Error(err)
+ }
+
+ if isOk {
+ t.Fatal("Attribute position=18 should have failed")
+ }
+}
+
+func TestVerifyAttributes_InvalidHeader(t *testing.T) {
+ primitives.SetSecurityLevel("SHA3", 256)
+
+ /*
+ TODO: ##attributes-keys-pending This code have be redefined to avoid use of metadata field.
+ tcert, prek0, err := loadTCertAndPreK0() */
+ tcert, err := loadTCertClear()
+ if err != nil {
+ t.Error(err)
+ }
+
+ //Change header extensions
+ tcert.Raw[583] = tcert.Raw[583] + 124
+
+ tcertder := tcert.Raw
+ /*
+ TODO: ##attributes-keys-pending This code have be redefined to avoid use of metadata field.
+ metadata := []byte{32,64}
+ attributeMetadata, err := attributes.CreateAttributesMetadata(tcertder, metadata, prek0, attributeNames)
+ if err != nil {
+ t.Error(err)
+ }
+ stub := &chaincodeStubMock{callerCert: tcertder, metadata: attributeMetadata}*/
+ stub := &chaincodeStubMock{callerCert: tcertder}
+ handler, err := NewAttributesHandlerImpl(stub)
+ if err != nil {
+ t.Error(err)
+ }
+
+ _, err = handler.VerifyAttributes(&Attribute{Name: "position", Value: []byte("Software Engineer")})
+ if err == nil {
+ t.Fatal("Error can't be nil.")
+ }
+}
+
+func TestVerifyAttributes_InvalidAttributeValue(t *testing.T) {
+ primitives.SetSecurityLevel("SHA3", 256)
+
+ /*
+ TODO: ##attributes-keys-pending This code have be redefined to avoid use of metadata field.
+ tcert, prek0, err := loadTCertAndPreK0() */
+ tcert, err := loadTCertClear()
+ if err != nil {
+ t.Error(err)
+ }
+
+ //Change header extensions
+ /*
+ TODO: ##attributes-keys-pending This code have be redefined to avoid use of metadata field. 337 is the offset in encrypted tcert.
+ tcert.Raw[371] = tcert.Raw[371] + 124*/
+ tcert.Raw[558] = tcert.Raw[558] + 124
+
+ tcertder := tcert.Raw
+ /*
+ TODO: ##attributes-keys-pending This code have be redefined to avoid use of metadata field.
+ metadata := []byte{32,64}
+ attributeMetadata, err := attributes.CreateAttributesMetadata(tcertder, metadata, prek0, attributeNames)
+ if err != nil {
+ t.Error(err)
+ }
+ stub := &chaincodeStubMock{callerCert: tcertder, metadata: attributeMetadata} */
+ stub := &chaincodeStubMock{callerCert: tcertder}
+ handler, err := NewAttributesHandlerImpl(stub)
+ if err != nil {
+ t.Fatalf("Error creating attribute handlder %v", err)
+ }
+
+ v, err := handler.GetValue("position")
+ if err == nil {
+ t.Fatal("Error can't be nil." + string(v))
+ }
+}
+
+func TestVerifyAttributes_Null(t *testing.T) {
+ primitives.SetSecurityLevel("SHA3", 256)
+
+ /*
+ TODO: ##attributes-keys-pending This code have be redefined to avoid use of metadata field.
+ tcert, prek0, err := loadTCertAndPreK0() */
+ tcert, err := loadTCertClear()
+ if err != nil {
+ t.Error(err)
+ }
+ tcertder := tcert.Raw
+
+ /*
+ TODO: ##attributes-keys-pending This code have be redefined to avoid use of metadata field.
+ metadata := []byte{32,64}
+ attributeMetadata, err := attributes.CreateAttributesMetadata(tcertder, metadata, prek0, attributeNames)
+ if err != nil {
+ t.Error(err)
+ }
+ stub := &chaincodeStubMock{callerCert: tcertder, metadata: attributeMetadata}*/
+ stub := &chaincodeStubMock{callerCert: tcertder}
+ handler, err := NewAttributesHandlerImpl(stub)
+ if err != nil {
+ t.Error(err)
+ }
+
+ isOk, err := handler.VerifyAttribute("position", nil)
+ if err != nil {
+ t.Error(err)
+ }
+
+ if isOk {
+ t.Fatal("Attribute null is ok.")
+ }
+}
+
+func TestGetValue(t *testing.T) {
+ primitives.SetSecurityLevel("SHA3", 256)
+
+ /*
+ TODO: ##attributes-keys-pending This code have be redefined to avoid use of metadata field.
+ tcert, prek0, err := loadTCertAndPreK0() */
+ tcert, err := loadTCertClear()
+ if err != nil {
+ t.Error(err)
+ }
+ tcertder := tcert.Raw
+ /*
+ TODO: ##attributes-keys-pending This code have be redefined to avoid use of metadata field.
+ metadata := []byte{32, 64}
+
+ attributeMetadata, err := attributes.CreateAttributesMetadata(tcertder, metadata, prek0, attributeNames)
+ if err != nil {
+ t.Error(err)
+ }
+ stub := &chaincodeStubMock{callerCert: tcertder, metadata: attributeMetadata}*/
+ stub := &chaincodeStubMock{callerCert: tcertder}
+ handler, err := NewAttributesHandlerImpl(stub)
+ if err != nil {
+ t.Error(err)
+ }
+
+ value, err := handler.GetValue("position")
+ if err != nil {
+ t.Error(err)
+ }
+
+ if bytes.Compare(value, []byte("Software Engineer")) != 0 {
+ t.Fatalf("Value expected was [%v] and result was [%v].", []byte("Software Engineer"), value)
+ }
+
+ //Second time read from cache.
+ value, err = handler.GetValue("position")
+ if err != nil {
+ t.Error(err)
+ }
+
+ if bytes.Compare(value, []byte("Software Engineer")) != 0 {
+ t.Fatalf("Value expected was [%v] and result was [%v].", []byte("Software Engineer"), value)
+ }
+}
+
+func TestGetValue_Clear(t *testing.T) {
+ primitives.SetSecurityLevel("SHA3", 256)
+
+ tcert, err := loadTCertClear()
+ if err != nil {
+ t.Error(err)
+ }
+ tcertder := tcert.Raw
+ value, err := GetValueFrom("position", tcertder)
+ if err != nil {
+ t.Error(err)
+ }
+
+ if bytes.Compare(value, []byte("Software Engineer")) != 0 {
+ t.Fatalf("Value expected was [%v] and result was [%v].", []byte("Software Engineer"), value)
+ }
+
+ //Second time read from cache.
+ value, err = GetValueFrom("position", tcertder)
+ if err != nil {
+ t.Error(err)
+ }
+
+ if bytes.Compare(value, []byte("Software Engineer")) != 0 {
+ t.Fatalf("Value expected was [%v] and result was [%v].", []byte("Software Engineer"), value)
+ }
+}
+
+func TestGetValue_BadHeaderTCert(t *testing.T) {
+ primitives.SetSecurityLevel("SHA3", 256)
+
+ tcert, err := loadTCertFromFile("./test_resources/tcert_bad.dump")
+ if err != nil {
+ t.Error(err)
+ }
+ tcertder := tcert.Raw
+ _, err = GetValueFrom("position", tcertder)
+ if err == nil {
+ t.Fatal("Test should be fail due TCert has an invalid header.")
+ }
+}
+
+func TestGetValue_Clear_NullTCert(t *testing.T) {
+ primitives.SetSecurityLevel("SHA3", 256)
+ _, err := GetValueFrom("position", nil)
+ if err == nil {
+ t.Error(err)
+ }
+}
+
+func TestGetValue_InvalidAttribute(t *testing.T) {
+ primitives.SetSecurityLevel("SHA3", 256)
+
+ /*
+ TODO: ##attributes-keys-pending This code have be redefined to avoid use of metadata field.
+ tcert, prek0, err := loadTCertAndPreK0() */
+ tcert, err := loadTCertClear()
+ if err != nil {
+ t.Error(err)
+ }
+ tcertder := tcert.Raw
+ /*
+ TODO: ##attributes-keys-pending This code have be redefined to avoid use of metadata field.
+ metadata := []byte{32, 64}
+ attributeMetadata, err := attributes.CreateAttributesMetadata(tcertder, metadata, prek0, attributeNames)
+ if err != nil {
+ t.Error(err)
+ }
+ stub := &chaincodeStubMock{callerCert: tcertder, metadata: attributeMetadata}*/
+ stub := &chaincodeStubMock{callerCert: tcertder}
+ handler, err := NewAttributesHandlerImpl(stub)
+ if err != nil {
+ t.Error(err)
+ }
+
+ _, err = handler.GetValue("age")
+ if err == nil {
+ t.Error(err)
+ }
+
+ //Force invalid key
+ handler.keys["position"] = nil
+ _, err = handler.GetValue("positions")
+ if err == nil {
+ t.Error(err)
+ }
+}
+
+func TestGetValue_Clear_InvalidAttribute(t *testing.T) {
+ primitives.SetSecurityLevel("SHA3", 256)
+
+ tcert, err := loadTCertClear()
+ if err != nil {
+ t.Error(err)
+ }
+ tcertder := tcert.Raw
+ /*
+ TODO: ##attributes-keys-pending This code have be redefined to avoid use of metadata field.
+ metadata := []byte{32, 64}
+ attributeMetadata, err := attributes.CreateAttributesMetadata(tcertder, metadata, prek0, attributeNames)
+ if err != nil {
+ t.Error(err)
+ }
+ stub := &chaincodeStubMock{callerCert: tcertder, metadata: attributeMetadata}*/
+ stub := &chaincodeStubMock{callerCert: tcertder}
+ handler, err := NewAttributesHandlerImpl(stub)
+ if err != nil {
+ t.Error(err)
+ }
+
+ value, err := handler.GetValue("age")
+ if value != nil || err == nil {
+ t.Fatalf("Test should fail [%v] \n", string(value))
+ }
+}
+
+func TestGetValue_InvalidAttribute_ValidAttribute(t *testing.T) {
+ primitives.SetSecurityLevel("SHA3", 256)
+
+ /*
+ TODO: ##attributes-keys-pending This code have be redefined to avoid use of metadata field.
+ tcert, prek0, err := loadTCertAndPreK0() */
+ tcert, err := loadTCertClear()
+ if err != nil {
+ t.Error(err)
+ }
+ tcertder := tcert.Raw
+ /*
+ TODO: ##attributes-keys-pending This code have be redefined to avoid use of metadata field.
+ metadata := []byte{32, 64}
+ attributeMetadata, err := attributes.CreateAttributesMetadata(tcertder, metadata, prek0, attributeNames)
+ if err != nil {
+ t.Error(err)
+ }
+ stub := &chaincodeStubMock{callerCert: tcertder, metadata: attributeMetadata}*/
+ stub := &chaincodeStubMock{callerCert: tcertder}
+ handler, err := NewAttributesHandlerImpl(stub)
+ if err != nil {
+ t.Error(err)
+ }
+
+ _, err = handler.GetValue("age")
+ if err == nil {
+ t.Error(err)
+ }
+
+ //Second time read a valid attribute from the TCert.
+ value, err := handler.GetValue("position")
+ if err != nil {
+ t.Error(err)
+ }
+
+ if bytes.Compare(value, []byte("Software Engineer")) != 0 {
+ t.Fatalf("Value expected was [%v] and result was [%v].", []byte("Software Engineer"), value)
+ }
+}
+
+func loadTCertFromFile(filepath string) (*x509.Certificate, error) {
+ tcertRaw, err := ioutil.ReadFile(filepath)
+ if err != nil {
+ return nil, err
+ }
+
+ tcertDecoded, _ := pem.Decode(tcertRaw)
+
+ tcert, err := x509.ParseCertificate(tcertDecoded.Bytes)
+ if err != nil {
+ return nil, err
+ }
+
+ return tcert, nil
+}
+
+func loadTCertAndPreK0() (*x509.Certificate, []byte, error) {
+ preKey0, err := ioutil.ReadFile("./test_resources/prek0.dump")
+ if err != nil {
+ return nil, nil, err
+ }
+
+ if err != nil {
+ return nil, nil, err
+ }
+
+ tcert, err := loadTCertFromFile("./test_resources/tcert.dump")
+ if err != nil {
+ return nil, nil, err
+ }
+
+ return tcert, preKey0, nil
+}
+
+func loadTCertClear() (*x509.Certificate, error) {
+ return loadTCertFromFile("./test_resources/tcert_clear.dump")
+}
diff --git a/core/chaincode/shim/crypto/attr/test_resources/prek0.dump b/core/chaincode/shim/crypto/attr/test_resources/prek0.dump
new file mode 100644
index 00000000000..442dfea6ca2
--- /dev/null
+++ b/core/chaincode/shim/crypto/attr/test_resources/prek0.dump
@@ -0,0 +1 @@
+ýA>ø0`ñËÕ›=)»á*¨®g&V'ù aÅàkâJ
\ No newline at end of file
diff --git a/core/chaincode/shim/crypto/attr/test_resources/tcert.dump b/core/chaincode/shim/crypto/attr/test_resources/tcert.dump
new file mode 100644
index 00000000000..95c9862125f
--- /dev/null
+++ b/core/chaincode/shim/crypto/attr/test_resources/tcert.dump
@@ -0,0 +1,18 @@
+-----BEGIN CERTIFICATE-----
+MIIC2TCCAn+gAwIBAgIQBq019NJETm+JnM9pUFY5fDAKBggqhkjOPQQDAzAxMQsw
+CQYDVQQGEwJVUzEUMBIGA1UEChMLSHlwZXJsZWRnZXIxDDAKBgNVBAMTA3RjYTAe
+Fw0xNjA1MjYxODU4MjBaFw0xNjA4MjQxODU4MjBaMDMxCzAJBgNVBAYTAlVTMRQw
+EgYDVQQKEwtIeXBlcmxlZGdlcjEOMAwGA1UEAxMFZGllZ28wWTATBgcqhkjOPQIB
+BggqhkjOPQMBBwNCAARv37sk17/Yq6Ata16fj1CacV3uvYzCwgWx2hearwfEBbEh
++lfmQXYUEu7pcaEaPh+9dNVEeqyHWGqFu2mo5tufo4IBdTCCAXEwDgYDVR0PAQH/
+BAQDAgeAMAwGA1UdEwEB/wQCMAAwDQYDVR0OBAYEBAECAwQwDwYDVR0jBAgwBoAE
+AQIDBDBKBgYqAwQFBgoEQLb0qP6OU62xQQewx5PhpJu9cnLp54+aOpSptE78GNyd
+vh7xNtSHD1GTouEK2RFx1YwJZHAM7OS48JTDZoPr1L4wTQYGKgMEBQYHAQH/BEDv
+kl2JEP0f8OQRch84Hd3o3oGJbOrUBA5eYP0TaQLzpbGop4Z3Uun2Iyllfhixr+Gq
+2Xv1vuSDsbNDpObuQthJMEoGBioDBAUGCARALLtd0x7G/yc2WSSo6ag0nntWayud
+kaIW7NOJiWGJaOFtP+fufzIUPzYvBAuQIk3nYeOLBH/948ZyKsJQWW/LtzBKBgYq
+AwQFBgkEQAXgSabQSC5xHa/YXQi7nlStN81eiG/VhGfTeLvkHXPMDcULAGyKHtax
+l2IaAap9QetXi6pkN78lO048IhFTFCswCgYIKoZIzj0EAwMDSAAwRQIhAMSY4g4E
+hWh7Ey4sOpPYfJwfM82nZHboLEUzrWFwuZ+KAiBf2V0OoXPt2I2MaV1+2OQIaHcJ
+BF8oB65Ox67VENMNUg==
+-----END CERTIFICATE-----
diff --git a/core/chaincode/shim/crypto/attr/test_resources/tcert_bad.dump b/core/chaincode/shim/crypto/attr/test_resources/tcert_bad.dump
new file mode 100644
index 00000000000..ac61d3bdfa2
--- /dev/null
+++ b/core/chaincode/shim/crypto/attr/test_resources/tcert_bad.dump
@@ -0,0 +1,17 @@
+-----BEGIN CERTIFICATE-----
+MIICoDCCAkagAwIBAgIRALdvx2997k0el4M6q7FoSYswCgYIKoZIzj0EAwMwMTEL
+MAkGA1UEBhMCVVMxFDASBgNVBAoTC0h5cGVybGVkZ2VyMQwwCgYDVQQDEwN0Y2Ew
+HhcNMTYwNjExMDI1NzIzWhcNMTYwOTA5MDI1NzIzWjAzMQswCQYDVQQGEwJVUzEU
+MBIGA1UEChMLSHlwZXJsZWRnZXIxDjAMBgNVBAMTBWRpZWdvMFkwEwYHKoZIzj0C
+AQYIKoZIzj0DAQcDQgAETkdTO8tIQhNbm4k01RUIRV+vfqV7cCiEe/he209CoL9w
+ykpkNpLWDvuuaPngwiQ3XoBpR5KkV11WLEkm6YgO/KOCATswggE3MA4GA1UdDwEB
+/wQEAwIHgDAMBgNVHRMBAf8EAjAAMA0GA1UdDgQGBAQBAgMEMA8GA1UdIwQIMAaA
+BAECAwQwGwYGKgMEBQYKBBFTb2Z0d2FyZSBFbmdpbmVlcjASBgYqAwQFBgsECEFD
+b21wYW55ME0GBioDBAUGBwEB/wRA3Fw686SYs3vBtSGe+NhpnLjJMXNDgTrYO8qi
+Q4gFvNvPZvrU+QDgBnzS/YRUSP8FS/d8NA3ur5QWyafuFeRatzBKBgYqAwQFBggE
+QIb+dQuwRhrYXRvJmpVDcZMacvQtPJYRX/rI0SwC9mcylgUhypfs7bbXleskX4cW
+Oi/WC2ntOW9MN4g2usDrCOMwKwYGKgMEBQYJBCEwMEhFQURwb3NpdGlvbi0+MTAw
+I2NvbXBhbnktPjEwMCMwCgYIKoZIzj0EAwMDSAAwRQIhANtfRCq+wkaSDfhpASA6
+0oyUcMQWSyVaOvMfWnmRJAJAAiBcnczAputMeGMWbHdpI2aVR7yn4o+MqRVzEBy3
+odOIQw==
+-----END CERTIFICATE-----
diff --git a/core/chaincode/shim/crypto/attr/test_resources/tcert_clear.dump b/core/chaincode/shim/crypto/attr/test_resources/tcert_clear.dump
new file mode 100644
index 00000000000..2c39b6df4a4
--- /dev/null
+++ b/core/chaincode/shim/crypto/attr/test_resources/tcert_clear.dump
@@ -0,0 +1,16 @@
+-----BEGIN CERTIFICATE-----
+MIICnDCCAkGgAwIBAgIQDeV68dFWTbaf9Z5CElzSPjAKBggqhkjOPQQDAzAxMQsw
+CQYDVQQGEwJVUzEUMBIGA1UEChMLSHlwZXJsZWRnZXIxDDAKBgNVBAMTA3RjYTAe
+Fw0xNjA2MTEwMTIwNDlaFw0xNjA5MDkwMTIwNDlaMDMxCzAJBgNVBAYTAlVTMRQw
+EgYDVQQKEwtIeXBlcmxlZGdlcjEOMAwGA1UEAxMFZGllZ28wWTATBgcqhkjOPQIB
+BggqhkjOPQMBBwNCAAT8PV1TuE63bFhpDpStyT1oi1sQBg1mcqgAZ8VKiHdyqE/j
+tihuHuMJv8fEDLB56GwfTZNGAO0NJOykou3QVLBTo4IBNzCCATMwDgYDVR0PAQH/
+BAQDAgeAMAwGA1UdEwEB/wQCMAAwDQYDVR0OBAYEBAECAwQwDwYDVR0jBAgwBoAE
+AQIDBDAbBgYqAwQFBgoEEVNvZnR3YXJlIEVuZ2luZWVyMBIGBioDBAUGCwQIQUNv
+bXBhbnkwTQYGKgMEBQYHAQH/BECHkB85vxvL+vE5aGVvbclE6A8hthYo15KSARz/
+ZF5wc2JJfHIMs+M4MV00kExRitvVsOXVxYqMGHZcsyzT56jaMEoGBioDBAUGCARA
+9DXfVKvsohk/E4J1UUxeELM1EMhCeTpkOFPs0mLUytGyQlL9Wuj7SAKHF3GP3Pkj
+AX9wZaw26RrNYRTIa0azwTAnBgYqAwQFBgkEHTAwSEVBRHBvc2l0aW9uLT4xI2Nv
+bXBhbnktPjIjMAoGCCqGSM49BAMDA0kAMEYCIQCw/3c2dA3KEU9jfGkPbwYxX1UK
+BuQzK2hmoD+CUEGtpgIhAPW+4/+IL+6A367zTuqpI//6e75pSdj43Wn2rKuhSbF9
+-----END CERTIFICATE-----
diff --git a/core/chaincode/shim/crypto/crypto.go b/core/chaincode/shim/crypto/crypto.go
new file mode 100644
index 00000000000..4fb850bde06
--- /dev/null
+++ b/core/chaincode/shim/crypto/crypto.go
@@ -0,0 +1,24 @@
+/*
+Copyright IBM Corp. 2016 All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package crypto
+
+// SignatureVerifier verifies signatures
+type SignatureVerifier interface {
+
+ // Verify verifies signature sig against verification key vk and message msg.
+ Verify(vk, sig, msg []byte) (bool, error)
+}
diff --git a/core/chaincode/shim/crypto/ecdsa/ecdsa.go b/core/chaincode/shim/crypto/ecdsa/ecdsa.go
new file mode 100644
index 00000000000..a862867f124
--- /dev/null
+++ b/core/chaincode/shim/crypto/ecdsa/ecdsa.go
@@ -0,0 +1,67 @@
+/*
+Copyright IBM Corp. 2016 All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package ecdsa
+
+import (
+ "crypto/ecdsa"
+ "encoding/asn1"
+ "math/big"
+
+ "github.com/hyperledger/fabric/core/chaincode/shim/crypto"
+)
+
+type x509ECDSASignatureVerifierImpl struct {
+}
+
+// ECDSASignature represents an ECDSA signature
+type ECDSASignature struct {
+ R, S *big.Int
+}
+
+func (sv *x509ECDSASignatureVerifierImpl) Verify(certificate, signature, message []byte) (bool, error) {
+ // Interpret vk as an x509 certificate
+ cert, err := derToX509Certificate(certificate)
+ if err != nil {
+ return false, err
+ }
+
+ // TODO: verify certificate
+
+ // Interpret signature as an ECDSA signature
+ vk := cert.PublicKey.(*ecdsa.PublicKey)
+
+ return sv.verifyImpl(vk, signature, message)
+}
+
+func (sv *x509ECDSASignatureVerifierImpl) verifyImpl(vk *ecdsa.PublicKey, signature, message []byte) (bool, error) {
+ ecdsaSignature := new(ECDSASignature)
+ _, err := asn1.Unmarshal(signature, ecdsaSignature)
+ if err != nil {
+ return false, err
+ }
+
+ h, err := computeHash(message, vk.Params().BitSize)
+ if err != nil {
+ return false, err
+ }
+
+ return ecdsa.Verify(vk, h, ecdsaSignature.R, ecdsaSignature.S), nil
+}
+
+func NewX509ECDSASignatureVerifier() crypto.SignatureVerifier {
+ return &x509ECDSASignatureVerifierImpl{}
+}
diff --git a/core/chaincode/shim/crypto/ecdsa/ecdsa_test.go b/core/chaincode/shim/crypto/ecdsa/ecdsa_test.go
new file mode 100644
index 00000000000..008367ea1f6
--- /dev/null
+++ b/core/chaincode/shim/crypto/ecdsa/ecdsa_test.go
@@ -0,0 +1,191 @@
+/*
+Copyright IBM Corp. 2016 All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package ecdsa
+
+import (
+ "testing"
+
+ "github.com/hyperledger/fabric/core/crypto/primitives"
+)
+
+func TestSignatureVerifier(t *testing.T) {
+ // Create a signature
+ primitives.SetSecurityLevel("SHA3", 256)
+
+ cert, key, err := primitives.NewSelfSignedCert()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ message := []byte("Hello World!")
+ signature, err := primitives.ECDSASign(key, message)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // Instantiate a new SignatureVerifier
+ sv := NewX509ECDSASignatureVerifier()
+
+ // Verify the signature
+ ok, err := sv.Verify(cert, signature, message)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if !ok {
+ t.Fatal("Signature does not verify")
+ }
+}
+
+func TestSignatureVerifierSHA2(t *testing.T) {
+ // Create a signature
+ primitives.SetSecurityLevel("SHA2", 256)
+
+ cert, key, err := primitives.NewSelfSignedCert()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ message := []byte("Hello World!")
+ signature, err := primitives.ECDSASign(key, message)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // Instantiate a new SignatureVerifier
+ sv := NewX509ECDSASignatureVerifier()
+
+ // Verify the signature
+ ok, err := sv.Verify(cert, signature, message)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if !ok {
+ t.Fatal("Signature does not verify")
+ }
+}
+
+func TestSignatureVerifierSHA2_384(t *testing.T) {
+ // Create a signature
+ primitives.SetSecurityLevel("SHA2", 384)
+
+ cert, key, err := primitives.NewSelfSignedCert()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ message := []byte("Hello World!")
+ signature, err := primitives.ECDSASign(key, message)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // Instantiate a new SignatureVerifier
+ sv := NewX509ECDSASignatureVerifier()
+
+ // Verify the signature
+ ok, err := sv.Verify(cert, signature, message)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if !ok {
+ t.Fatal("Signature does not verify")
+ }
+}
+
+func TestSignatureVerifierSHA3_384(t *testing.T) {
+ // Create a signature
+ primitives.SetSecurityLevel("SHA3", 384)
+
+ cert, key, err := primitives.NewSelfSignedCert()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ message := []byte("Hello World!")
+ signature, err := primitives.ECDSASign(key, message)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // Instantiate a new SignatureVerifier
+ sv := NewX509ECDSASignatureVerifier()
+
+ // Verify the signature
+ ok, err := sv.Verify(cert, signature, message)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if !ok {
+ t.Fatal("Signature does not verify")
+ }
+}
+
+func TestSignatureVerifierSHA2_512(t *testing.T) {
+ // Create a signature
+ primitives.SetSecurityLevel("SHA2", 512)
+
+ cert, key, err := primitives.NewSelfSignedCert()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ message := []byte("Hello World!")
+ signature, err := primitives.ECDSASign(key, message)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // Instantiate a new SignatureVerifier
+ sv := NewX509ECDSASignatureVerifier()
+
+ // Verify the signature
+ ok, err := sv.Verify(cert, signature, message)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if !ok {
+ t.Fatal("Signature does not verify")
+ }
+}
+
+func TestSignatureVerifierSHA3_512(t *testing.T) {
+ // Create a signature
+ primitives.SetSecurityLevel("SHA3", 512)
+
+ cert, key, err := primitives.NewSelfSignedCert()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ message := []byte("Hello World!")
+ signature, err := primitives.ECDSASign(key, message)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // Instantiate a new SignatureVerifier
+ sv := NewX509ECDSASignatureVerifier()
+
+ // Verify the signature
+ ok, err := sv.Verify(cert, signature, message)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if !ok {
+ t.Fatal("Signature does not verify")
+ }
+}
diff --git a/core/chaincode/shim/crypto/ecdsa/hash.go b/core/chaincode/shim/crypto/ecdsa/hash.go
new file mode 100644
index 00000000000..6317cad7c5c
--- /dev/null
+++ b/core/chaincode/shim/crypto/ecdsa/hash.go
@@ -0,0 +1,82 @@
+/*
+Copyright IBM Corp. 2016 All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package ecdsa
+
+import (
+ "crypto/sha256"
+ "crypto/sha512"
+ "fmt"
+ "hash"
+
+ "golang.org/x/crypto/sha3"
+
+ "github.com/hyperledger/fabric/core/crypto/primitives"
+)
+
+func getHashSHA2(bitsize int) (hash.Hash, error) {
+ switch bitsize {
+ case 224:
+ return sha256.New224(), nil
+ case 256:
+ return sha256.New(), nil
+ case 384:
+ return sha512.New384(), nil
+ case 512:
+ return sha512.New(), nil
+ case 521:
+ return sha512.New(), nil
+ default:
+ return nil, fmt.Errorf("Invalid bitsize. It was [%d]. Expected [224, 256, 384, 512, 521]", bitsize)
+ }
+}
+
+func getHashSHA3(bitsize int) (hash.Hash, error) {
+ switch bitsize {
+ case 224:
+ return sha3.New224(), nil
+ case 256:
+ return sha3.New256(), nil
+ case 384:
+ return sha3.New384(), nil
+ case 512:
+ return sha3.New512(), nil
+ case 521:
+ return sha3.New512(), nil
+ default:
+ return nil, fmt.Errorf("Invalid bitsize. It was [%d]. Expected [224, 256, 384, 512, 521]", bitsize)
+ }
+}
+
+func computeHash(msg []byte, bitsize int) ([]byte, error) {
+ var hash hash.Hash
+ var err error
+ switch primitives.GetHashAlgorithm() {
+ case "SHA2":
+ hash, err = getHashSHA2(bitsize)
+ case "SHA3":
+ hash, err = getHashSHA3(bitsize)
+ default:
+ return nil, fmt.Errorf("Invalid hash algorithm " + primitives.GetHashAlgorithm())
+ }
+
+ if err != nil {
+ return nil, err
+ }
+
+ hash.Write(msg)
+ return hash.Sum(nil), nil
+}
diff --git a/core/chaincode/shim/crypto/ecdsa/x509.go b/core/chaincode/shim/crypto/ecdsa/x509.go
new file mode 100644
index 00000000000..d65f0cc6fee
--- /dev/null
+++ b/core/chaincode/shim/crypto/ecdsa/x509.go
@@ -0,0 +1,25 @@
+/*
+Copyright IBM Corp. 2016 All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package ecdsa
+
+import (
+ "crypto/x509"
+)
+
+func derToX509Certificate(asn1Data []byte) (*x509.Certificate, error) {
+ return x509.ParseCertificate(asn1Data)
+}
diff --git a/core/chaincode/shim/handler.go b/core/chaincode/shim/handler.go
new file mode 100644
index 00000000000..f6975c19849
--- /dev/null
+++ b/core/chaincode/shim/handler.go
@@ -0,0 +1,906 @@
+/*
+Copyright IBM Corp. 2016 All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package shim
+
+import (
+ "errors"
+ "fmt"
+ "sync"
+
+ "github.com/golang/protobuf/proto"
+ pb "github.com/hyperledger/fabric/protos"
+ "github.com/looplab/fsm"
+)
+
+// PeerChaincodeStream interface for stream between Peer and chaincode instance.
+type PeerChaincodeStream interface {
+ Send(*pb.ChaincodeMessage) error
+ Recv() (*pb.ChaincodeMessage, error)
+ CloseSend() error
+}
+
+type nextStateInfo struct {
+ msg *pb.ChaincodeMessage
+ sendToCC bool
+}
+
+func (handler *Handler) triggerNextState(msg *pb.ChaincodeMessage, send bool) {
+ handler.nextState <- &nextStateInfo{msg, send}
+}
+
+// Handler handler implementation for shim side of chaincode.
+type Handler struct {
+ sync.RWMutex
+ //shim to peer grpc serializer. User only in serialSend
+ serialLock sync.Mutex
+ To string
+ ChatStream PeerChaincodeStream
+ FSM *fsm.FSM
+ cc Chaincode
+ // Multiple queries (and one transaction) with different Uuids can be executing in parallel for this chaincode
+ // responseChannel is the channel on which responses are communicated by the shim to the chaincodeStub.
+ responseChannel map[string]chan pb.ChaincodeMessage
+ // Track which UUIDs are transactions and which are queries, to decide whether get/put state and invoke chaincode are allowed.
+ isTransaction map[string]bool
+ nextState chan *nextStateInfo
+}
+
+func shortuuid(uuid string) string {
+ if len(uuid) < 8 {
+ return uuid
+ }
+ return uuid[0:8]
+}
+
+func (handler *Handler) serialSend(msg *pb.ChaincodeMessage) error {
+ handler.serialLock.Lock()
+ defer handler.serialLock.Unlock()
+ if err := handler.ChatStream.Send(msg); err != nil {
+ chaincodeLogger.Errorf("[%s]Error sending %s: %s", shortuuid(msg.Uuid), msg.Type.String(), err)
+ return fmt.Errorf("Error sending %s: %s", msg.Type.String(), err)
+ }
+ return nil
+}
+
+func (handler *Handler) createChannel(uuid string) (chan pb.ChaincodeMessage, error) {
+ handler.Lock()
+ defer handler.Unlock()
+ if handler.responseChannel == nil {
+ return nil, fmt.Errorf("[%s]Cannot create response channel", shortuuid(uuid))
+ }
+ if handler.responseChannel[uuid] != nil {
+ return nil, fmt.Errorf("[%s]Channel exists", shortuuid(uuid))
+ }
+ c := make(chan pb.ChaincodeMessage)
+ handler.responseChannel[uuid] = c
+ return c, nil
+}
+
+func (handler *Handler) sendChannel(msg *pb.ChaincodeMessage) error {
+ handler.Lock()
+ defer handler.Unlock()
+ if handler.responseChannel == nil {
+ return fmt.Errorf("[%s]Cannot send message response channel", shortuuid(msg.Uuid))
+ }
+ if handler.responseChannel[msg.Uuid] == nil {
+ return fmt.Errorf("[%s]sendChannel does not exist", shortuuid(msg.Uuid))
+ }
+
+ chaincodeLogger.Debugf("[%s]before send", shortuuid(msg.Uuid))
+ handler.responseChannel[msg.Uuid] <- *msg
+ chaincodeLogger.Debugf("[%s]after send", shortuuid(msg.Uuid))
+
+ return nil
+}
+
+func (handler *Handler) receiveChannel(c chan pb.ChaincodeMessage) (pb.ChaincodeMessage, bool) {
+ msg, val := <-c
+ return msg, val
+}
+
+func (handler *Handler) deleteChannel(uuid string) {
+ handler.Lock()
+ defer handler.Unlock()
+ if handler.responseChannel != nil {
+ delete(handler.responseChannel, uuid)
+ }
+}
+
+// markIsTransaction marks a UUID as a transaction or a query; true = transaction, false = query
+func (handler *Handler) markIsTransaction(uuid string, isTrans bool) bool {
+ if handler.isTransaction == nil {
+ return false
+ }
+ handler.Lock()
+ defer handler.Unlock()
+ handler.isTransaction[uuid] = isTrans
+ return true
+}
+
+func (handler *Handler) deleteIsTransaction(uuid string) {
+ handler.Lock()
+ if handler.isTransaction != nil {
+ delete(handler.isTransaction, uuid)
+ }
+ handler.Unlock()
+}
+
+// NewChaincodeHandler returns a new instance of the shim side handler.
+func newChaincodeHandler(peerChatStream PeerChaincodeStream, chaincode Chaincode) *Handler {
+ v := &Handler{
+ ChatStream: peerChatStream,
+ cc: chaincode,
+ }
+ v.responseChannel = make(map[string]chan pb.ChaincodeMessage)
+ v.isTransaction = make(map[string]bool)
+ v.nextState = make(chan *nextStateInfo)
+
+ // Create the shim side FSM
+ v.FSM = fsm.NewFSM(
+ "created",
+ fsm.Events{
+ {Name: pb.ChaincodeMessage_REGISTERED.String(), Src: []string{"created"}, Dst: "established"},
+ {Name: pb.ChaincodeMessage_INIT.String(), Src: []string{"established"}, Dst: "init"},
+ {Name: pb.ChaincodeMessage_READY.String(), Src: []string{"established"}, Dst: "ready"},
+ {Name: pb.ChaincodeMessage_ERROR.String(), Src: []string{"init"}, Dst: "established"},
+ {Name: pb.ChaincodeMessage_RESPONSE.String(), Src: []string{"init"}, Dst: "init"},
+ {Name: pb.ChaincodeMessage_COMPLETED.String(), Src: []string{"init"}, Dst: "ready"},
+ {Name: pb.ChaincodeMessage_TRANSACTION.String(), Src: []string{"ready"}, Dst: "transaction"},
+ {Name: pb.ChaincodeMessage_COMPLETED.String(), Src: []string{"transaction"}, Dst: "ready"},
+ {Name: pb.ChaincodeMessage_ERROR.String(), Src: []string{"transaction"}, Dst: "ready"},
+ {Name: pb.ChaincodeMessage_RESPONSE.String(), Src: []string{"transaction"}, Dst: "transaction"},
+ {Name: pb.ChaincodeMessage_QUERY.String(), Src: []string{"transaction"}, Dst: "transaction"},
+ {Name: pb.ChaincodeMessage_QUERY.String(), Src: []string{"ready"}, Dst: "ready"},
+ {Name: pb.ChaincodeMessage_RESPONSE.String(), Src: []string{"ready"}, Dst: "ready"},
+ },
+ fsm.Callbacks{
+ "before_" + pb.ChaincodeMessage_REGISTERED.String(): func(e *fsm.Event) { v.beforeRegistered(e) },
+ //"after_" + pb.ChaincodeMessage_INIT.String(): func(e *fsm.Event) { v.beforeInit(e) },
+ //"after_" + pb.ChaincodeMessage_TRANSACTION.String(): func(e *fsm.Event) { v.beforeTransaction(e) },
+ "after_" + pb.ChaincodeMessage_RESPONSE.String(): func(e *fsm.Event) { v.afterResponse(e) },
+ "after_" + pb.ChaincodeMessage_ERROR.String(): func(e *fsm.Event) { v.afterError(e) },
+ "enter_init": func(e *fsm.Event) { v.enterInitState(e) },
+ "enter_transaction": func(e *fsm.Event) { v.enterTransactionState(e) },
+ //"enter_ready": func(e *fsm.Event) { v.enterReadyState(e) },
+ "before_" + pb.ChaincodeMessage_QUERY.String(): func(e *fsm.Event) { v.beforeQuery(e) }, //only checks for QUERY
+ },
+ )
+ return v
+}
+
+// beforeRegistered is called to handle the REGISTERED message.
+func (handler *Handler) beforeRegistered(e *fsm.Event) {
+ if _, ok := e.Args[0].(*pb.ChaincodeMessage); !ok {
+ e.Cancel(fmt.Errorf("Received unexpected message type"))
+ return
+ }
+ chaincodeLogger.Debugf("Received %s, ready for invocations", pb.ChaincodeMessage_REGISTERED)
+}
+
+// handleInit handles request to initialize chaincode.
+func (handler *Handler) handleInit(msg *pb.ChaincodeMessage) {
+ // The defer followed by triggering a go routine dance is needed to ensure that the previous state transition
+ // is completed before the next one is triggered. The previous state transition is deemed complete only when
+ // the beforeInit function is exited. Interesting bug fix!!
+ go func() {
+ var nextStateMsg *pb.ChaincodeMessage
+
+ send := true
+
+ defer func() {
+ handler.triggerNextState(nextStateMsg, send)
+ }()
+
+ // Get the function and args from Payload
+ input := &pb.ChaincodeInput{}
+ unmarshalErr := proto.Unmarshal(msg.Payload, input)
+ if unmarshalErr != nil {
+ payload := []byte(unmarshalErr.Error())
+ // Send ERROR message to chaincode support and change state
+ chaincodeLogger.Debugf("[%s]Incorrect payload format. Sending %s", shortuuid(msg.Uuid), pb.ChaincodeMessage_ERROR)
+ nextStateMsg = &pb.ChaincodeMessage{Type: pb.ChaincodeMessage_ERROR, Payload: payload, Uuid: msg.Uuid}
+ return
+ }
+
+ // Mark as a transaction (allow put/del state)
+ handler.markIsTransaction(msg.Uuid, true)
+
+ // Call chaincode's Run
+ // Create the ChaincodeStub which the chaincode can use to callback
+ stub := new(ChaincodeStub)
+ stub.init(msg.Uuid, msg.SecurityContext)
+ res, err := handler.cc.Init(stub, input.Function, input.Args)
+
+ // delete isTransaction entry
+ handler.deleteIsTransaction(msg.Uuid)
+
+ if err != nil {
+ payload := []byte(err.Error())
+ // Send ERROR message to chaincode support and change state
+ chaincodeLogger.Errorf("[%s]Init failed. Sending %s", shortuuid(msg.Uuid), pb.ChaincodeMessage_ERROR)
+ nextStateMsg = &pb.ChaincodeMessage{Type: pb.ChaincodeMessage_ERROR, Payload: payload, Uuid: msg.Uuid, ChaincodeEvent: stub.chaincodeEvent}
+ return
+ }
+
+ // Send COMPLETED message to chaincode support and change state
+ nextStateMsg = &pb.ChaincodeMessage{Type: pb.ChaincodeMessage_COMPLETED, Payload: res, Uuid: msg.Uuid, ChaincodeEvent: stub.chaincodeEvent}
+ chaincodeLogger.Debugf("[%s]Init succeeded. Sending %s", shortuuid(msg.Uuid), pb.ChaincodeMessage_COMPLETED)
+ }()
+}
+
+// enterInitState will initialize the chaincode if entering init from established.
+func (handler *Handler) enterInitState(e *fsm.Event) {
+ chaincodeLogger.Debugf("Entered state %s", handler.FSM.Current())
+ msg, ok := e.Args[0].(*pb.ChaincodeMessage)
+ if !ok {
+ e.Cancel(fmt.Errorf("Received unexpected message type"))
+ return
+ }
+ chaincodeLogger.Debugf("[%s]Received %s, initializing chaincode", shortuuid(msg.Uuid), msg.Type.String())
+ if msg.Type.String() == pb.ChaincodeMessage_INIT.String() {
+ // Call the chaincode's Run function to initialize
+ handler.handleInit(msg)
+ }
+}
+
+// handleTransaction Handles request to execute a transaction.
+func (handler *Handler) handleTransaction(msg *pb.ChaincodeMessage) {
+ // The defer followed by triggering a go routine dance is needed to ensure that the previous state transition
+ // is completed before the next one is triggered. The previous state transition is deemed complete only when
+ // the beforeInit function is exited. Interesting bug fix!!
+ go func() {
+ //better not be nil
+ var nextStateMsg *pb.ChaincodeMessage
+
+ send := true
+
+ defer func() {
+ handler.triggerNextState(nextStateMsg, send)
+ }()
+
+ // Get the function and args from Payload
+ input := &pb.ChaincodeInput{}
+ unmarshalErr := proto.Unmarshal(msg.Payload, input)
+ if unmarshalErr != nil {
+ payload := []byte(unmarshalErr.Error())
+ // Send ERROR message to chaincode support and change state
+ chaincodeLogger.Debugf("[%s]Incorrect payload format. Sending %s", shortuuid(msg.Uuid), pb.ChaincodeMessage_ERROR)
+ nextStateMsg = &pb.ChaincodeMessage{Type: pb.ChaincodeMessage_ERROR, Payload: payload, Uuid: msg.Uuid}
+ return
+ }
+
+ // Mark as a transaction (allow put/del state)
+ handler.markIsTransaction(msg.Uuid, true)
+
+ // Call chaincode's Run
+ // Create the ChaincodeStub which the chaincode can use to callback
+ stub := new(ChaincodeStub)
+ stub.init(msg.Uuid, msg.SecurityContext)
+ res, err := handler.cc.Invoke(stub, input.Function, input.Args)
+
+ // delete isTransaction entry
+ handler.deleteIsTransaction(msg.Uuid)
+
+ if err != nil {
+ payload := []byte(err.Error())
+ // Send ERROR message to chaincode support and change state
+ chaincodeLogger.Errorf("[%s]Transaction execution failed. Sending %s", shortuuid(msg.Uuid), pb.ChaincodeMessage_ERROR)
+ nextStateMsg = &pb.ChaincodeMessage{Type: pb.ChaincodeMessage_ERROR, Payload: payload, Uuid: msg.Uuid, ChaincodeEvent: stub.chaincodeEvent}
+ return
+ }
+
+ // Send COMPLETED message to chaincode support and change state
+ chaincodeLogger.Debugf("[%s]Transaction completed. Sending %s", shortuuid(msg.Uuid), pb.ChaincodeMessage_COMPLETED)
+ nextStateMsg = &pb.ChaincodeMessage{Type: pb.ChaincodeMessage_COMPLETED, Payload: res, Uuid: msg.Uuid, ChaincodeEvent: stub.chaincodeEvent}
+ }()
+}
+
+// handleQuery handles request to execute a query.
+func (handler *Handler) handleQuery(msg *pb.ChaincodeMessage) {
+ // Query does not transition state. It can happen anytime after Ready
+ go func() {
+ var serialSendMsg *pb.ChaincodeMessage
+
+ defer func() {
+ handler.serialSend(serialSendMsg)
+ }()
+
+ // Get the function and args from Payload
+ input := &pb.ChaincodeInput{}
+ unmarshalErr := proto.Unmarshal(msg.Payload, input)
+ if unmarshalErr != nil {
+ payload := []byte(unmarshalErr.Error())
+ // Send ERROR message to chaincode support and change state
+ chaincodeLogger.Debugf("[%s]Incorrect payload format. Sending %s", shortuuid(msg.Uuid), pb.ChaincodeMessage_QUERY_ERROR)
+ serialSendMsg = &pb.ChaincodeMessage{Type: pb.ChaincodeMessage_QUERY_ERROR, Payload: payload, Uuid: msg.Uuid}
+ return
+ }
+
+ // Mark as a query (do not allow put/del state)
+ handler.markIsTransaction(msg.Uuid, false)
+
+ // Call chaincode's Query
+ // Create the ChaincodeStub which the chaincode can use to callback
+ stub := new(ChaincodeStub)
+ stub.init(msg.Uuid, msg.SecurityContext)
+ res, err := handler.cc.Query(stub, input.Function, input.Args)
+
+ // delete isTransaction entry
+ handler.deleteIsTransaction(msg.Uuid)
+
+ if err != nil {
+ payload := []byte(err.Error())
+ // Send ERROR message to chaincode support and change state
+ chaincodeLogger.Errorf("[%s]Query execution failed. Sending %s", shortuuid(msg.Uuid), pb.ChaincodeMessage_QUERY_ERROR)
+ serialSendMsg = &pb.ChaincodeMessage{Type: pb.ChaincodeMessage_QUERY_ERROR, Payload: payload, Uuid: msg.Uuid}
+ return
+ }
+
+ // Send COMPLETED message to chaincode support
+ chaincodeLogger.Debugf("[%s]Query completed. Sending %s", shortuuid(msg.Uuid), pb.ChaincodeMessage_QUERY_COMPLETED)
+ serialSendMsg = &pb.ChaincodeMessage{Type: pb.ChaincodeMessage_QUERY_COMPLETED, Payload: res, Uuid: msg.Uuid}
+ }()
+}
+
+// enterTransactionState will execute chaincode's Run if coming from a TRANSACTION event.
+func (handler *Handler) enterTransactionState(e *fsm.Event) {
+ msg, ok := e.Args[0].(*pb.ChaincodeMessage)
+ if !ok {
+ e.Cancel(fmt.Errorf("Received unexpected message type"))
+ return
+ }
+ chaincodeLogger.Debugf("[%s]Received %s, invoking transaction on chaincode(Src:%s, Dst:%s)", shortuuid(msg.Uuid), msg.Type.String(), e.Src, e.Dst)
+ if msg.Type.String() == pb.ChaincodeMessage_TRANSACTION.String() {
+ // Call the chaincode's Run function to invoke transaction
+ handler.handleTransaction(msg)
+ }
+}
+
+// enterReadyState will need to handle COMPLETED event by sending message to the peer
+//func (handler *Handler) enterReadyState(e *fsm.Event) {
+
+// afterCompleted will need to handle COMPLETED event by sending message to the peer
+func (handler *Handler) afterCompleted(e *fsm.Event) {
+ msg, ok := e.Args[0].(*pb.ChaincodeMessage)
+ if !ok {
+ e.Cancel(fmt.Errorf("Received unexpected message type"))
+ return
+ }
+ chaincodeLogger.Debugf("[%s]sending COMPLETED to validator for tid", shortuuid(msg.Uuid))
+ if err := handler.serialSend(msg); err != nil {
+ e.Cancel(fmt.Errorf("send COMPLETED failed %s", err))
+ }
+}
+
+// beforeQuery is invoked when a query message is received from the validator
+func (handler *Handler) beforeQuery(e *fsm.Event) {
+ if e.Args != nil {
+ msg, ok := e.Args[0].(*pb.ChaincodeMessage)
+ if !ok {
+ e.Cancel(fmt.Errorf("Received unexpected message type"))
+ return
+ }
+ handler.handleQuery(msg)
+ }
+}
+
+// afterResponse is called to deliver a response or error to the chaincode stub.
+func (handler *Handler) afterResponse(e *fsm.Event) {
+ msg, ok := e.Args[0].(*pb.ChaincodeMessage)
+ if !ok {
+ e.Cancel(fmt.Errorf("Received unexpected message type"))
+ return
+ }
+
+ if err := handler.sendChannel(msg); err != nil {
+ chaincodeLogger.Errorf("[%s]error sending %s (state:%s): %s", shortuuid(msg.Uuid), msg.Type, handler.FSM.Current(), err)
+ } else {
+ chaincodeLogger.Debugf("[%s]Received %s, communicated (state:%s)", shortuuid(msg.Uuid), msg.Type, handler.FSM.Current())
+ }
+}
+
+func (handler *Handler) afterError(e *fsm.Event) {
+ msg, ok := e.Args[0].(*pb.ChaincodeMessage)
+ if !ok {
+ e.Cancel(fmt.Errorf("Received unexpected message type"))
+ return
+ }
+
+ /* TODO- revisit. This may no longer be needed with the serialized/streamlined messaging model
+ * There are two situations in which the ERROR event can be triggered:
+ * 1. When an error is encountered within handleInit or handleTransaction - some issue at the chaincode side; In this case there will be no responseChannel and the message has been sent to the validator.
+ * 2. The chaincode has initiated a request (get/put/del state) to the validator and is expecting a response on the responseChannel; If ERROR is received from validator, this needs to be notified on the responseChannel.
+ */
+ if err := handler.sendChannel(msg); err == nil {
+ chaincodeLogger.Debugf("[%s]Error received from validator %s, communicated(state:%s)", shortuuid(msg.Uuid), msg.Type, handler.FSM.Current())
+ }
+}
+
+// TODO: Implement method to get and put entire state map and not one key at a time?
+// handleGetState communicates with the validator to fetch the requested state information from the ledger.
+func (handler *Handler) handleGetState(key string, uuid string) ([]byte, error) {
+ // Create the channel on which to communicate the response from validating peer
+ respChan, uniqueReqErr := handler.createChannel(uuid)
+ if uniqueReqErr != nil {
+ chaincodeLogger.Debug("Another state request pending for this Uuid. Cannot process.")
+ return nil, uniqueReqErr
+ }
+
+ defer handler.deleteChannel(uuid)
+
+ // Send GET_STATE message to validator chaincode support
+ payload := []byte(key)
+ msg := &pb.ChaincodeMessage{Type: pb.ChaincodeMessage_GET_STATE, Payload: payload, Uuid: uuid}
+ chaincodeLogger.Debugf("[%s]Sending %s", shortuuid(msg.Uuid), pb.ChaincodeMessage_GET_STATE)
+ if err := handler.serialSend(msg); err != nil {
+ chaincodeLogger.Errorf("[%s]error sending GET_STATE %s", shortuuid(uuid), err)
+ return nil, errors.New("could not send msg")
+ }
+
+ // Wait on responseChannel for response
+ responseMsg, ok := handler.receiveChannel(respChan)
+ if !ok {
+ chaincodeLogger.Errorf("[%s]Received unexpected message type", shortuuid(responseMsg.Uuid))
+ return nil, errors.New("Received unexpected message type")
+ }
+
+ if responseMsg.Type.String() == pb.ChaincodeMessage_RESPONSE.String() {
+ // Success response
+ chaincodeLogger.Debugf("[%s]GetState received payload %s", shortuuid(responseMsg.Uuid), pb.ChaincodeMessage_RESPONSE)
+ return responseMsg.Payload, nil
+ }
+ if responseMsg.Type.String() == pb.ChaincodeMessage_ERROR.String() {
+ // Error response
+ chaincodeLogger.Errorf("[%s]GetState received error %s", shortuuid(responseMsg.Uuid), pb.ChaincodeMessage_ERROR)
+ return nil, errors.New(string(responseMsg.Payload[:]))
+ }
+
+ // Incorrect chaincode message received
+ chaincodeLogger.Errorf("[%s]Incorrect chaincode message %s received. Expecting %s or %s", shortuuid(responseMsg.Uuid), responseMsg.Type, pb.ChaincodeMessage_RESPONSE, pb.ChaincodeMessage_ERROR)
+ return nil, errors.New("Incorrect chaincode message received")
+}
+
+// handlePutState communicates with the validator to put state information into the ledger.
+func (handler *Handler) handlePutState(key string, value []byte, uuid string) error {
+ // Check if this is a transaction
+ chaincodeLogger.Debugf("[%s]Inside putstate, isTransaction = %t", shortuuid(uuid), handler.isTransaction[uuid])
+ if !handler.isTransaction[uuid] {
+ return errors.New("Cannot put state in query context")
+ }
+
+ payload := &pb.PutStateInfo{Key: key, Value: value}
+ payloadBytes, err := proto.Marshal(payload)
+ if err != nil {
+ return errors.New("Failed to process put state request")
+ }
+
+ // Create the channel on which to communicate the response from validating peer
+ respChan, uniqueReqErr := handler.createChannel(uuid)
+ if uniqueReqErr != nil {
+ chaincodeLogger.Errorf("[%s]Another state request pending for this Uuid. Cannot process.", shortuuid(uuid))
+ return uniqueReqErr
+ }
+
+ defer handler.deleteChannel(uuid)
+
+ // Send PUT_STATE message to validator chaincode support
+ msg := &pb.ChaincodeMessage{Type: pb.ChaincodeMessage_PUT_STATE, Payload: payloadBytes, Uuid: uuid}
+ chaincodeLogger.Debugf("[%s]Sending %s", shortuuid(msg.Uuid), pb.ChaincodeMessage_PUT_STATE)
+ if err = handler.serialSend(msg); err != nil {
+ chaincodeLogger.Errorf("[%s]error sending PUT_STATE %s", msg.Uuid, err)
+ return errors.New("could not send msg")
+ }
+
+ // Wait on responseChannel for response
+ responseMsg, ok := handler.receiveChannel(respChan)
+ if !ok {
+ chaincodeLogger.Errorf("[%s]Received unexpected message type", msg.Uuid)
+ return errors.New("Received unexpected message type")
+ }
+
+ if responseMsg.Type.String() == pb.ChaincodeMessage_RESPONSE.String() {
+ // Success response
+ chaincodeLogger.Debugf("[%s]Received %s. Successfully updated state", shortuuid(responseMsg.Uuid), pb.ChaincodeMessage_RESPONSE)
+ return nil
+ }
+
+ if responseMsg.Type.String() == pb.ChaincodeMessage_ERROR.String() {
+ // Error response
+ chaincodeLogger.Errorf("[%s]Received %s. Payload: %s", shortuuid(responseMsg.Uuid), pb.ChaincodeMessage_ERROR, responseMsg.Payload)
+ return errors.New(string(responseMsg.Payload[:]))
+ }
+
+ // Incorrect chaincode message received
+ chaincodeLogger.Errorf("[%s]Incorrect chaincode message %s received. Expecting %s or %s", shortuuid(responseMsg.Uuid), responseMsg.Type, pb.ChaincodeMessage_RESPONSE, pb.ChaincodeMessage_ERROR)
+ return errors.New("Incorrect chaincode message received")
+}
+
+// handleDelState communicates with the validator to delete a key from the state in the ledger.
+func (handler *Handler) handleDelState(key string, uuid string) error {
+ // Check if this is a transaction
+ if !handler.isTransaction[uuid] {
+ return errors.New("Cannot del state in query context")
+ }
+
+ // Create the channel on which to communicate the response from validating peer
+ respChan, uniqueReqErr := handler.createChannel(uuid)
+ if uniqueReqErr != nil {
+ chaincodeLogger.Errorf("[%s]Another state request pending for this Uuid. Cannot process create createChannel.", shortuuid(uuid))
+ return uniqueReqErr
+ }
+
+ defer handler.deleteChannel(uuid)
+
+ // Send DEL_STATE message to validator chaincode support
+ payload := []byte(key)
+ msg := &pb.ChaincodeMessage{Type: pb.ChaincodeMessage_DEL_STATE, Payload: payload, Uuid: uuid}
+ chaincodeLogger.Debugf("[%s]Sending %s", shortuuid(msg.Uuid), pb.ChaincodeMessage_DEL_STATE)
+ if err := handler.serialSend(msg); err != nil {
+ chaincodeLogger.Errorf("[%s]error sending DEL_STATE %s", shortuuid(msg.Uuid), pb.ChaincodeMessage_DEL_STATE)
+ return errors.New("could not send msg")
+ }
+
+ // Wait on responseChannel for response
+ responseMsg, ok := handler.receiveChannel(respChan)
+ if !ok {
+ chaincodeLogger.Errorf("[%s]Received unexpected message type", shortuuid(msg.Uuid))
+ return errors.New("Received unexpected message type")
+ }
+
+ if responseMsg.Type.String() == pb.ChaincodeMessage_RESPONSE.String() {
+ // Success response
+ chaincodeLogger.Debugf("[%s]Received %s. Successfully deleted state", msg.Uuid, pb.ChaincodeMessage_RESPONSE)
+ return nil
+ }
+ if responseMsg.Type.String() == pb.ChaincodeMessage_ERROR.String() {
+ // Error response
+ chaincodeLogger.Errorf("[%s]Received %s. Payload: %s", msg.Uuid, pb.ChaincodeMessage_ERROR, responseMsg.Payload)
+ return errors.New(string(responseMsg.Payload[:]))
+ }
+
+ // Incorrect chaincode message received
+ chaincodeLogger.Errorf("[%s]Incorrect chaincode message %s received. Expecting %s or %s", shortuuid(responseMsg.Uuid), responseMsg.Type, pb.ChaincodeMessage_RESPONSE, pb.ChaincodeMessage_ERROR)
+ return errors.New("Incorrect chaincode message received")
+}
+
+func (handler *Handler) handleRangeQueryState(startKey, endKey string, uuid string) (*pb.RangeQueryStateResponse, error) {
+ // Create the channel on which to communicate the response from validating peer
+ respChan, uniqueReqErr := handler.createChannel(uuid)
+ if uniqueReqErr != nil {
+ chaincodeLogger.Debugf("[%s]Another state request pending for this Uuid. Cannot process.", shortuuid(uuid))
+ return nil, uniqueReqErr
+ }
+
+ defer handler.deleteChannel(uuid)
+
+ // Send RANGE_QUERY_STATE message to validator chaincode support
+ payload := &pb.RangeQueryState{StartKey: startKey, EndKey: endKey}
+ payloadBytes, err := proto.Marshal(payload)
+ if err != nil {
+ return nil, errors.New("Failed to process range query state request")
+ }
+ msg := &pb.ChaincodeMessage{Type: pb.ChaincodeMessage_RANGE_QUERY_STATE, Payload: payloadBytes, Uuid: uuid}
+ chaincodeLogger.Debugf("[%s]Sending %s", shortuuid(msg.Uuid), pb.ChaincodeMessage_RANGE_QUERY_STATE)
+ if err = handler.serialSend(msg); err != nil {
+ chaincodeLogger.Errorf("[%s]error sending %s", shortuuid(msg.Uuid), pb.ChaincodeMessage_RANGE_QUERY_STATE)
+ return nil, errors.New("could not send msg")
+ }
+
+ // Wait on responseChannel for response
+ responseMsg, ok := handler.receiveChannel(respChan)
+ if !ok {
+ chaincodeLogger.Errorf("[%s]Received unexpected message type", uuid)
+ return nil, errors.New("Received unexpected message type")
+ }
+
+ if responseMsg.Type.String() == pb.ChaincodeMessage_RESPONSE.String() {
+ // Success response
+ chaincodeLogger.Debugf("[%s]Received %s. Successfully got range", shortuuid(responseMsg.Uuid), pb.ChaincodeMessage_RESPONSE)
+
+ rangeQueryResponse := &pb.RangeQueryStateResponse{}
+ unmarshalErr := proto.Unmarshal(responseMsg.Payload, rangeQueryResponse)
+ if unmarshalErr != nil {
+ chaincodeLogger.Errorf("[%s]unmarshall error", shortuuid(responseMsg.Uuid))
+ return nil, errors.New("Error unmarshalling RangeQueryStateResponse.")
+ }
+
+ return rangeQueryResponse, nil
+ }
+ if responseMsg.Type.String() == pb.ChaincodeMessage_ERROR.String() {
+ // Error response
+ chaincodeLogger.Errorf("[%s]Received %s", shortuuid(responseMsg.Uuid), pb.ChaincodeMessage_ERROR)
+ return nil, errors.New(string(responseMsg.Payload[:]))
+ }
+
+ // Incorrect chaincode message received
+ chaincodeLogger.Errorf("Incorrect chaincode message %s recieved. Expecting %s or %s", responseMsg.Type, pb.ChaincodeMessage_RESPONSE, pb.ChaincodeMessage_ERROR)
+ return nil, errors.New("Incorrect chaincode message received")
+}
+
+func (handler *Handler) handleRangeQueryStateNext(id, uuid string) (*pb.RangeQueryStateResponse, error) {
+ // Create the channel on which to communicate the response from validating peer
+ respChan, uniqueReqErr := handler.createChannel(uuid)
+ if uniqueReqErr != nil {
+ chaincodeLogger.Debugf("[%s]Another state request pending for this Uuid. Cannot process.", shortuuid(uuid))
+ return nil, uniqueReqErr
+ }
+
+ defer handler.deleteChannel(uuid)
+
+ // Send RANGE_QUERY_STATE_NEXT message to validator chaincode support
+ payload := &pb.RangeQueryStateNext{ID: id}
+ payloadBytes, err := proto.Marshal(payload)
+ if err != nil {
+ return nil, errors.New("Failed to process range query state next request")
+ }
+ msg := &pb.ChaincodeMessage{Type: pb.ChaincodeMessage_RANGE_QUERY_STATE_NEXT, Payload: payloadBytes, Uuid: uuid}
+ chaincodeLogger.Debugf("[%s]Sending %s", shortuuid(msg.Uuid), pb.ChaincodeMessage_RANGE_QUERY_STATE_NEXT)
+ if err = handler.serialSend(msg); err != nil {
+ chaincodeLogger.Errorf("[%s]error sending %s", shortuuid(msg.Uuid), pb.ChaincodeMessage_RANGE_QUERY_STATE_NEXT)
+ return nil, errors.New("could not send msg")
+ }
+
+ // Wait on responseChannel for response
+ responseMsg, ok := handler.receiveChannel(respChan)
+ if !ok {
+ chaincodeLogger.Errorf("[%s]Received unexpected message type", uuid)
+ return nil, errors.New("Received unexpected message type")
+ }
+
+ if responseMsg.Type.String() == pb.ChaincodeMessage_RESPONSE.String() {
+ // Success response
+ chaincodeLogger.Debugf("[%s]Received %s. Successfully got range", shortuuid(responseMsg.Uuid), pb.ChaincodeMessage_RESPONSE)
+
+ rangeQueryResponse := &pb.RangeQueryStateResponse{}
+ unmarshalErr := proto.Unmarshal(responseMsg.Payload, rangeQueryResponse)
+ if unmarshalErr != nil {
+ chaincodeLogger.Errorf("[%s]unmarshall error", shortuuid(responseMsg.Uuid))
+ return nil, errors.New("Error unmarshalling RangeQueryStateResponse.")
+ }
+
+ return rangeQueryResponse, nil
+ }
+ if responseMsg.Type.String() == pb.ChaincodeMessage_ERROR.String() {
+ // Error response
+ chaincodeLogger.Errorf("[%s]Received %s", shortuuid(responseMsg.Uuid), pb.ChaincodeMessage_ERROR)
+ return nil, errors.New(string(responseMsg.Payload[:]))
+ }
+
+ // Incorrect chaincode message received
+ chaincodeLogger.Errorf("Incorrect chaincode message %s recieved. Expecting %s or %s", responseMsg.Type, pb.ChaincodeMessage_RESPONSE, pb.ChaincodeMessage_ERROR)
+ return nil, errors.New("Incorrect chaincode message received")
+}
+
+func (handler *Handler) handleRangeQueryStateClose(id, uuid string) (*pb.RangeQueryStateResponse, error) {
+ // Create the channel on which to communicate the response from validating peer
+ respChan, uniqueReqErr := handler.createChannel(uuid)
+ if uniqueReqErr != nil {
+ chaincodeLogger.Debugf("[%s]Another state request pending for this Uuid. Cannot process.", shortuuid(uuid))
+ return nil, uniqueReqErr
+ }
+
+ defer handler.deleteChannel(uuid)
+
+ // Send RANGE_QUERY_STATE_CLOSE message to validator chaincode support
+ payload := &pb.RangeQueryStateClose{ID: id}
+ payloadBytes, err := proto.Marshal(payload)
+ if err != nil {
+ return nil, errors.New("Failed to process range query state close request")
+ }
+ msg := &pb.ChaincodeMessage{Type: pb.ChaincodeMessage_RANGE_QUERY_STATE_CLOSE, Payload: payloadBytes, Uuid: uuid}
+ chaincodeLogger.Debugf("[%s]Sending %s", shortuuid(msg.Uuid), pb.ChaincodeMessage_RANGE_QUERY_STATE_CLOSE)
+ if err = handler.serialSend(msg); err != nil {
+ chaincodeLogger.Errorf("[%s]error sending %s", shortuuid(msg.Uuid), pb.ChaincodeMessage_RANGE_QUERY_STATE_CLOSE)
+ return nil, errors.New("could not send msg")
+ }
+
+ // Wait on responseChannel for response
+ responseMsg, ok := handler.receiveChannel(respChan)
+ if !ok {
+ chaincodeLogger.Errorf("[%s]Received unexpected message type", uuid)
+ return nil, errors.New("Received unexpected message type")
+ }
+
+ if responseMsg.Type.String() == pb.ChaincodeMessage_RESPONSE.String() {
+ // Success response
+ chaincodeLogger.Debugf("[%s]Received %s. Successfully got range", shortuuid(responseMsg.Uuid), pb.ChaincodeMessage_RESPONSE)
+
+ rangeQueryResponse := &pb.RangeQueryStateResponse{}
+ unmarshalErr := proto.Unmarshal(responseMsg.Payload, rangeQueryResponse)
+ if unmarshalErr != nil {
+ chaincodeLogger.Errorf("[%s]unmarshall error", shortuuid(responseMsg.Uuid))
+ return nil, errors.New("Error unmarshalling RangeQueryStateResponse.")
+ }
+
+ return rangeQueryResponse, nil
+ }
+ if responseMsg.Type.String() == pb.ChaincodeMessage_ERROR.String() {
+ // Error response
+ chaincodeLogger.Errorf("[%s]Received %s", shortuuid(responseMsg.Uuid), pb.ChaincodeMessage_ERROR)
+ return nil, errors.New(string(responseMsg.Payload[:]))
+ }
+
+ // Incorrect chaincode message received
+ chaincodeLogger.Errorf("Incorrect chaincode message %s recieved. Expecting %s or %s", responseMsg.Type, pb.ChaincodeMessage_RESPONSE, pb.ChaincodeMessage_ERROR)
+ return nil, errors.New("Incorrect chaincode message received")
+}
+
+// handleInvokeChaincode communicates with the validator to invoke another chaincode.
+func (handler *Handler) handleInvokeChaincode(chaincodeName string, function string, args []string, uuid string) ([]byte, error) {
+ // Check if this is a transaction
+ if !handler.isTransaction[uuid] {
+ return nil, errors.New("Cannot invoke chaincode in query context")
+ }
+
+ chaincodeID := &pb.ChaincodeID{Name: chaincodeName}
+ input := &pb.ChaincodeInput{Function: function, Args: args}
+ payload := &pb.ChaincodeSpec{ChaincodeID: chaincodeID, CtorMsg: input}
+ payloadBytes, err := proto.Marshal(payload)
+ if err != nil {
+ return nil, errors.New("Failed to process invoke chaincode request")
+ }
+
+ // Create the channel on which to communicate the response from validating peer
+ respChan, uniqueReqErr := handler.createChannel(uuid)
+ if uniqueReqErr != nil {
+ chaincodeLogger.Errorf("[%s]Another request pending for this Uuid. Cannot process.", uuid)
+ return nil, uniqueReqErr
+ }
+
+ defer handler.deleteChannel(uuid)
+
+ // Send INVOKE_CHAINCODE message to validator chaincode support
+ msg := &pb.ChaincodeMessage{Type: pb.ChaincodeMessage_INVOKE_CHAINCODE, Payload: payloadBytes, Uuid: uuid}
+ chaincodeLogger.Debugf("[%s]Sending %s", shortuuid(msg.Uuid), pb.ChaincodeMessage_INVOKE_CHAINCODE)
+ if err = handler.serialSend(msg); err != nil {
+ chaincodeLogger.Errorf("[%s]error sending %s", shortuuid(msg.Uuid), pb.ChaincodeMessage_INVOKE_CHAINCODE)
+ return nil, errors.New("could not send msg")
+ }
+
+ // Wait on responseChannel for response
+ responseMsg, ok := handler.receiveChannel(respChan)
+ if !ok {
+ chaincodeLogger.Errorf("[%s]Received unexpected message type", shortuuid(msg.Uuid))
+ return nil, errors.New("Received unexpected message type")
+ }
+
+ if responseMsg.Type.String() == pb.ChaincodeMessage_RESPONSE.String() {
+ // Success response
+ chaincodeLogger.Debugf("[%s]Received %s. Successfully invoked chaincode", shortuuid(responseMsg.Uuid), pb.ChaincodeMessage_RESPONSE)
+ respMsg := &pb.ChaincodeMessage{}
+ if err := proto.Unmarshal(responseMsg.Payload, respMsg); err != nil {
+ chaincodeLogger.Errorf("[%s]Error unmarshaling called chaincode response: %s", shortuuid(responseMsg.Uuid), err)
+ return nil, err
+ }
+ if respMsg.Type == pb.ChaincodeMessage_COMPLETED {
+ // Success response
+ chaincodeLogger.Debugf("[%s]Received %s. Successfully invoed chaincode", shortuuid(responseMsg.Uuid), pb.ChaincodeMessage_RESPONSE)
+ return respMsg.Payload, nil
+ }
+ chaincodeLogger.Errorf("[%s]Received %s. Error from chaincode", shortuuid(responseMsg.Uuid), respMsg.Type.String())
+ return nil, errors.New(string(respMsg.Payload[:]))
+ }
+ if responseMsg.Type.String() == pb.ChaincodeMessage_ERROR.String() {
+ // Error response
+ chaincodeLogger.Errorf("[%s]Received %s.", shortuuid(responseMsg.Uuid), pb.ChaincodeMessage_ERROR)
+ return nil, errors.New(string(responseMsg.Payload[:]))
+ }
+
+ // Incorrect chaincode message received
+ chaincodeLogger.Debugf("[%s]Incorrect chaincode message %s received. Expecting %s or %s", shortuuid(responseMsg.Uuid), responseMsg.Type, pb.ChaincodeMessage_RESPONSE, pb.ChaincodeMessage_ERROR)
+ return nil, errors.New("Incorrect chaincode message received")
+}
+
+// handleQueryChaincode communicates with the validator to query another chaincode.
+func (handler *Handler) handleQueryChaincode(chaincodeName string, function string, args []string, uuid string) ([]byte, error) {
+ chaincodeID := &pb.ChaincodeID{Name: chaincodeName}
+ input := &pb.ChaincodeInput{Function: function, Args: args}
+ payload := &pb.ChaincodeSpec{ChaincodeID: chaincodeID, CtorMsg: input}
+ payloadBytes, err := proto.Marshal(payload)
+ if err != nil {
+ return nil, errors.New("Failed to process query chaincode request")
+ }
+
+ // Create the channel on which to communicate the response from validating peer
+ respChan, uniqueReqErr := handler.createChannel(uuid)
+ if uniqueReqErr != nil {
+ chaincodeLogger.Debug("Another request pending for this Uuid. Cannot process.")
+ return nil, uniqueReqErr
+ }
+
+ defer handler.deleteChannel(uuid)
+
+ // Send INVOKE_QUERY message to validator chaincode support
+ msg := &pb.ChaincodeMessage{Type: pb.ChaincodeMessage_INVOKE_QUERY, Payload: payloadBytes, Uuid: uuid}
+ chaincodeLogger.Debugf("[%s]Sending %s", shortuuid(msg.Uuid), pb.ChaincodeMessage_INVOKE_QUERY)
+ if err = handler.serialSend(msg); err != nil {
+ chaincodeLogger.Errorf("[%s]error sending %s", shortuuid(msg.Uuid), pb.ChaincodeMessage_INVOKE_QUERY)
+ return nil, errors.New("could not send msg")
+ }
+
+ // Wait on responseChannel for response
+ responseMsg, ok := handler.receiveChannel(respChan)
+ if !ok {
+ chaincodeLogger.Errorf("[%s]Received unexpected message type", shortuuid(msg.Uuid))
+ return nil, errors.New("Received unexpected message type")
+ }
+
+ if responseMsg.Type.String() == pb.ChaincodeMessage_RESPONSE.String() {
+ respMsg := &pb.ChaincodeMessage{}
+ if err := proto.Unmarshal(responseMsg.Payload, respMsg); err != nil {
+ chaincodeLogger.Errorf("[%s]Error unmarshaling called chaincode responseP: %s", shortuuid(responseMsg.Uuid), err)
+ return nil, err
+ }
+ if respMsg.Type == pb.ChaincodeMessage_QUERY_COMPLETED {
+ // Success response
+ chaincodeLogger.Debugf("[%s]Received %s. Successfully queried chaincode", shortuuid(responseMsg.Uuid), pb.ChaincodeMessage_RESPONSE)
+ return respMsg.Payload, nil
+ }
+ chaincodeLogger.Errorf("[%s]Error from chaincode: %s", shortuuid(responseMsg.Uuid), string(respMsg.Payload[:]))
+ return nil, errors.New(string(respMsg.Payload[:]))
+ }
+ if responseMsg.Type.String() == pb.ChaincodeMessage_ERROR.String() {
+ // Error response
+ chaincodeLogger.Errorf("[%s]Received %s.", shortuuid(responseMsg.Uuid), pb.ChaincodeMessage_ERROR)
+ return nil, errors.New(string(responseMsg.Payload[:]))
+ }
+
+ // Incorrect chaincode message received
+ chaincodeLogger.Errorf("[%s]Incorrect chaincode message %s recieved. Expecting %s or %s", shortuuid(responseMsg.Uuid), responseMsg.Type, pb.ChaincodeMessage_RESPONSE, pb.ChaincodeMessage_ERROR)
+ return nil, errors.New("Incorrect chaincode message received")
+}
+
+// handleMessage message handles loop for shim side of chaincode/validator stream.
+func (handler *Handler) handleMessage(msg *pb.ChaincodeMessage) error {
+ if msg.Type == pb.ChaincodeMessage_KEEPALIVE {
+ // Received a keep alive message, we don't do anything with it for now
+ // and it does not touch the state machine
+ return nil
+ }
+ chaincodeLogger.Debugf("[%s]Handling ChaincodeMessage of type: %s(state:%s)", shortuuid(msg.Uuid), msg.Type, handler.FSM.Current())
+ if handler.FSM.Cannot(msg.Type.String()) {
+ errStr := fmt.Sprintf("[%s]Chaincode handler FSM cannot handle message (%s) with payload size (%d) while in state: %s", msg.Uuid, msg.Type.String(), len(msg.Payload), handler.FSM.Current())
+ err := errors.New(errStr)
+ payload := []byte(err.Error())
+ errorMsg := &pb.ChaincodeMessage{Type: pb.ChaincodeMessage_ERROR, Payload: payload, Uuid: msg.Uuid}
+ handler.serialSend(errorMsg)
+ return err
+ }
+ err := handler.FSM.Event(msg.Type.String(), msg)
+ return filterError(err)
+}
+
+// filterError filters the errors to allow NoTransitionError and CanceledError to not propagate for cases where embedded Err == nil.
+func filterError(errFromFSMEvent error) error {
+ if errFromFSMEvent != nil {
+ if noTransitionErr, ok := errFromFSMEvent.(*fsm.NoTransitionError); ok {
+ if noTransitionErr.Err != nil {
+ // Only allow NoTransitionError's, all others are considered true error.
+ return errFromFSMEvent
+ }
+ }
+ if canceledErr, ok := errFromFSMEvent.(*fsm.CanceledError); ok {
+ if canceledErr.Err != nil {
+ // Only allow NoTransitionError's, all others are considered true error.
+ return canceledErr
+ //t.Error("expected only 'NoTransitionError'")
+ }
+ chaincodeLogger.Debugf("Ignoring CanceledError: %s", canceledErr)
+ }
+ }
+ return nil
+}
diff --git a/core/chaincode/shim/inprocstream.go b/core/chaincode/shim/inprocstream.go
new file mode 100644
index 00000000000..574b6b58e6b
--- /dev/null
+++ b/core/chaincode/shim/inprocstream.go
@@ -0,0 +1,45 @@
+/*
+Copyright IBM Corp. 2016 All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package shim
+
+import (
+ pb "github.com/hyperledger/fabric/protos"
+)
+
+// PeerChaincodeStream interface for stream between Peer and chaincode instance.
+type inProcStream struct {
+ recv <-chan *pb.ChaincodeMessage
+ send chan<- *pb.ChaincodeMessage
+}
+
+func newInProcStream(recv <-chan *pb.ChaincodeMessage, send chan<- *pb.ChaincodeMessage) *inProcStream {
+ return &inProcStream{recv, send}
+}
+
+func (s *inProcStream) Send(msg *pb.ChaincodeMessage) error {
+ s.send <- msg
+ return nil
+}
+
+func (s *inProcStream) Recv() (*pb.ChaincodeMessage, error) {
+ msg := <-s.recv
+ return msg, nil
+}
+
+func (s *inProcStream) CloseSend() error {
+ return nil
+}
diff --git a/core/chaincode/shim/java/build.gradle b/core/chaincode/shim/java/build.gradle
new file mode 100644
index 00000000000..4314021aaea
--- /dev/null
+++ b/core/chaincode/shim/java/build.gradle
@@ -0,0 +1,89 @@
+/*
+Copyright DTCC 2016 All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+buildscript {
+ repositories {
+ mavenLocal()
+ mavenCentral()
+ jcenter()
+ }
+ dependencies {
+ classpath 'com.google.protobuf:protobuf-gradle-plugin:0.7.6'
+ }
+}
+
+plugins {
+ id "java"
+ id "com.google.protobuf" version "0.7.6"
+ id "eclipse"
+ id "application"
+}
+
+mainClassName = "example.SimpleSample"
+
+run {
+ if (project.hasProperty("appArgs")) {
+ args = Eval.me(appArgs)
+ }
+}
+
+sourceSets {
+ main {
+ java {
+ srcDir 'src/main/java'
+ }
+ proto {
+ srcDir 'src/main/proto'
+ }
+ }
+}
+
+repositories {
+ mavenLocal()
+ mavenCentral()
+}
+
+protobuf {
+ generatedFilesBaseDir = "$projectDir/src"
+ protoc {
+ artifact = 'com.google.protobuf:protoc:3.0.0-beta-2'
+ }
+ plugins {
+ grpc {
+ artifact = 'io.grpc:protoc-gen-grpc-java:0.13.2'
+ }
+ }
+ generateProtoTasks {
+ all().each { task ->
+ task.builtins {
+ java {
+ outputSubDir = 'java'
+ }
+ }
+ task.plugins {
+ grpc {
+ outputSubDir = 'java'
+ }
+ }
+ }
+ }
+}
+
+dependencies {
+ compile 'com.google.protobuf:protobuf-java:3.0.0-beta-2'
+ compile 'io.grpc:grpc-all:0.13.2'
+ compile 'commons-cli:commons-cli:1.3.1'
+}
diff --git a/core/chaincode/shim/java/src/main/java/commons-logging.properties b/core/chaincode/shim/java/src/main/java/commons-logging.properties
new file mode 100644
index 00000000000..f38297442c7
--- /dev/null
+++ b/core/chaincode/shim/java/src/main/java/commons-logging.properties
@@ -0,0 +1,28 @@
+# commons-logging.properties
+# jdk handlers
+handlers=java.util.logging.FileHandler, java.util.logging.ConsoleHandler
+
+# default log level
+.level=INFO
+
+# Specific logger level
+example.Example.level=DEBUG
+example.SimpleSample.level=FINE
+
+# FileHandler options - can also be set to the ConsoleHandler
+# FileHandler level can be set to override the global level:
+java.util.logging.FileHandler.level=DEBUG
+
+# log file name for the File Handler
+java.util.logging.FileHandler.pattern=java-chaincode%u.log
+
+# Specify the style of output (simple or xml)
+java.util.logging.FileHandler.formatter=java.util.logging.SimpleFormatter
+
+# Optional - Limit the size of the file (in bytes)
+java.util.logging.FileHandler.limit=50000
+
+# Optional - The number of files to cycle through, by
+# appending an integer to the base file name:
+java.util.logging.FileHandler.count=10
+
diff --git a/core/chaincode/shim/java/src/main/java/example/Example.java b/core/chaincode/shim/java/src/main/java/example/Example.java
new file mode 100644
index 00000000000..3d9e3b9659c
--- /dev/null
+++ b/core/chaincode/shim/java/src/main/java/example/Example.java
@@ -0,0 +1,116 @@
+/*
+Copyright DTCC 2016 All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+
+package example;
+
+import org.hyperledger.java.shim.ChaincodeBase;
+import org.hyperledger.java.shim.ChaincodeStub;
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+
+
+/**
+ * "Hello world" Chaincode
+ * Features:
+ *
+ * - in addition to framework-propagated (put\del\query) functions
+ * also provides no-op function "hello" so it could be invoked without executing stub code @see shim.ChaincodeStub
+ * - query will greet you regardless if argument found in the map, but if found it will be personal greeting
+ * - put and del work as you would expect for Map implementation
+ *
+ * Meant as default java Chaincode implementation e.g. invoked by default and required no init\prep work to check finctionality
+ * steps to invoke chaincode functions in dev mode:
+ * 1. run node in dev mode e.g.:
+'export CORE_LOGGING_LEVEL=debug
+./peer node start --peer-chaincodedev'
+ *2. run chaincode e.g. via gradle:
+ 'gradle run'
+ *3. Now we can communicate to chaincode via peer:
+
+
+./peer chaincode deploy -n hello -c '{"Function":"init","Args":[]}'
+./peer chaincode query -n hello -c '{"Function":"put","Args":["Me"]}'
+- get you argument echo back if not found in the map
+
+./peer chaincode invoke -n hello -c '{"Function":"hello","Args":[""]}'
+- no-op test. invoke chaincode, but not ChaincodeStub @see shim.ChaincodeStub, Handler @see shim.Handler
+hence no channel call and only effect is line in stdout
+
+./peer chaincode invoke -n hello -c '{"Function":"put","Args":["hey","me"]}'
+- put your name on the map
+
+./peer chaincode query -n hello -c '{"Function":"put","Args":["hey"]}'
+- get you argument echo back if not found in the map
+
+./peer chaincode query -n hello -c '{"Function":"put","Args":["hey"]}'
+- personal greeting for mapped name
+
+ *
+ * @author Sergey Pomytkin spomytkin@gmail.com
+ *
+ */
+public class Example extends ChaincodeBase {
+ private static Log log = LogFactory.getLog(Example.class);
+
+ @Override
+ public String run(ChaincodeStub stub, String function, String[] args) {
+ log.info("In run, function:"+function);
+ switch (function) {
+ case "put":
+ for (int i = 0; i < args.length; i += 2)
+ stub.putState(args[i], args[i + 1]);
+ break;
+ case "del":
+ for (String arg : args)
+ stub.delState(arg);
+ break;
+ case "hello":
+ System.out.println("hello invoked");
+ log.info("hello invoked");
+ break;
+ }
+ log.error("No matching case for function:"+function);
+ return null;
+ }
+
+ @Override
+ public String query(ChaincodeStub stub, String function, String[] args) {
+ log.info("query");
+ System.out.println("Hello world! function:"+function);
+ log.debug("query:"+args[0]+"="+stub.getState(args[0]));
+ if (stub.getState(args[0])!=null&&!stub.getState(args[0]).isEmpty()){
+ log.trace("returning: Hello world! from "+ stub.getState(args[0]));
+ return "Hello world! from "+ stub.getState(args[0]);
+ }else{
+ log.debug("No value found for key '"+args[0]+"'");
+ return "Hello "+args[0]+"!";
+ }
+ }
+
+ @Override
+ public String getChaincodeID() {
+ return "hello";
+ }
+
+ public static void main(String[] args) throws Exception {
+ System.out.println("Hello world! starting "+args);
+ log.info("starting");
+ new Example().start(args);
+ }
+
+
+}
diff --git a/core/chaincode/shim/java/src/main/java/example/LinkExample.java b/core/chaincode/shim/java/src/main/java/example/LinkExample.java
new file mode 100644
index 00000000000..c9468b10987
--- /dev/null
+++ b/core/chaincode/shim/java/src/main/java/example/LinkExample.java
@@ -0,0 +1,62 @@
+/*
+Copyright DTCC 2016 All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package example;
+
+import org.hyperledger.java.shim.ChaincodeBase;
+import org.hyperledger.java.shim.ChaincodeStub;
+
+public class LinkExample extends ChaincodeBase {
+
+ //Default name for map chaincode in dev mode
+ //Can be set to a hash location via init or setMap
+ private String mapChaincode = "map";
+
+ @Override
+ public String run(ChaincodeStub stub, String function, String[] args) {
+ switch (function) {
+ case "init":
+ case "setMap":
+ mapChaincode = args[0];
+ break;
+ case "put":
+ stub.invokeChaincode(mapChaincode, function, args);
+ default:
+ break;
+ }
+ return null;
+ }
+
+ @Override
+ public String query(ChaincodeStub stub, String function, String[] args) {
+ String tmp = stub.queryChaincode("map", function, args);
+ if (tmp.isEmpty()) tmp = "NULL";
+ else tmp = "\"" + tmp + "\"";
+ tmp += " (queried from map chaincode)";
+ return tmp;
+ }
+
+ public static void main(String[] args) throws Exception {
+ new LinkExample().start(args);
+ //new Example().start();
+ }
+
+ @Override
+ public String getChaincodeID() {
+ return "link";
+ }
+
+}
diff --git a/core/chaincode/shim/java/src/main/java/example/MapExample.java b/core/chaincode/shim/java/src/main/java/example/MapExample.java
new file mode 100644
index 00000000000..76ebacbbc7a
--- /dev/null
+++ b/core/chaincode/shim/java/src/main/java/example/MapExample.java
@@ -0,0 +1,62 @@
+/*
+Copyright DTCC 2016 All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package example;
+
+import org.hyperledger.java.shim.ChaincodeBase;
+import org.hyperledger.java.shim.ChaincodeStub;
+
+public class MapExample extends ChaincodeBase {
+
+ @Override
+ public String run(ChaincodeStub stub, String function, String[] args) {
+ switch (function) {
+ case "put":
+ for (int i = 0; i < args.length; i += 2)
+ stub.putState(args[i], args[i + 1]);
+ break;
+ case "del":
+ for (String arg : args)
+ stub.delState(arg);
+ break;
+ }
+ return null;
+ }
+
+ @Override
+ public String query(ChaincodeStub stub, String function, String[] args) {
+// if ("range".equals(function)) {
+// String build = "";
+// HashMap range = stub.rangeQueryState(args[0], args[1], 10);
+// for (String s : range.keySet()) {
+// build += s + ":" + range.get(s) + " ";
+// }
+// return build;
+// }
+ return stub.getState(args[0]);
+ }
+
+ @Override
+ public String getChaincodeID() {
+ return "map";
+ }
+
+ public static void main(String[] args) throws Exception {
+ new MapExample().start(args);
+ }
+
+
+}
diff --git a/core/chaincode/shim/java/src/main/java/example/SimpleSample.java b/core/chaincode/shim/java/src/main/java/example/SimpleSample.java
new file mode 100644
index 00000000000..9fcb3245b90
--- /dev/null
+++ b/core/chaincode/shim/java/src/main/java/example/SimpleSample.java
@@ -0,0 +1,161 @@
+/*
+Copyright DTCC 2016 All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package example;
+
+import org.hyperledger.java.shim.ChaincodeBase;
+import org.hyperledger.java.shim.ChaincodeStub;
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+
+/**
+ * Classic "transfer" sample chaincode
+ * (java implementation of chaincode_example02.go)
+ * @author Sergey Pomytkin spomytkin@gmail.com
+ *
+ */
+public class SimpleSample extends ChaincodeBase {
+ private static Log log = LogFactory.getLog(SimpleSample.class);
+
+ @Override
+ public String run(ChaincodeStub stub, String function, String[] args) {
+ log.info("In run, function:"+function);
+
+ switch (function) {
+ case "init":
+ init(stub, function, args);
+ break;
+ case "transfer":
+ String re = transfer(stub, args);
+ System.out.println(re);
+ return re;
+ case "put":
+ for (int i = 0; i < args.length; i += 2)
+ stub.putState(args[i], args[i + 1]);
+ break;
+ case "del":
+ for (String arg : args)
+ stub.delState(arg);
+ break;
+ default:
+ return transfer(stub, args);
+ }
+
+ return null;
+ }
+
+ private String transfer(ChaincodeStub stub, String[] args) {
+ System.out.println("in transfer");
+ if(args.length!=3){
+ System.out.println("Incorrect number of arguments:"+args.length);
+ return "{\"Error\":\"Incorrect number of arguments. Expecting 3: from, to, amount\"}";
+ }
+ String fromName =args[0];
+ String fromAm=stub.getState(fromName);
+ String toName =args[1];
+ String toAm=stub.getState(toName);
+ String am =args[2];
+ int valFrom=0;
+ if (fromAm!=null&&!fromAm.isEmpty()){
+ try{
+ valFrom = Integer.parseInt(fromAm);
+ }catch(NumberFormatException e ){
+ System.out.println("{\"Error\":\"Expecting integer value for asset holding of "+fromName+" \"}"+e);
+ return "{\"Error\":\"Expecting integer value for asset holding of "+fromName+" \"}";
+ }
+ }else{
+ return "{\"Error\":\"Failed to get state for " +fromName + "\"}";
+ }
+
+ int valTo=0;
+ if (toAm!=null&&!toAm.isEmpty()){
+ try{
+ valTo = Integer.parseInt(toAm);
+ }catch(NumberFormatException e ){
+ e.printStackTrace();
+ return "{\"Error\":\"Expecting integer value for asset holding of "+toName+" \"}";
+ }
+ }else{
+ return "{\"Error\":\"Failed to get state for " +toName + "\"}";
+ }
+
+ int valA =0;
+ try{
+ valA = Integer.parseInt(am);
+ }catch(NumberFormatException e ){
+ e.printStackTrace();
+ return "{\"Error\":\"Expecting integer value for amount \"}";
+ }
+ if(valA>valFrom)
+ return "{\"Error\":\"Insufficient asset holding value for requested transfer amount \"}";
+ valFrom = valFrom-valA;
+ valTo = valTo+valA;
+ System.out.println("Transfer "+fromName+">"+toName+" am='"+am+"' new values='"+valFrom+"','"+ valTo+"'");
+ stub.putState(fromName,""+ valFrom);
+ stub.putState(toName, ""+valTo);
+
+ System.out.println("Transfer complete");
+
+ return null;
+
+ }
+
+ public String init(ChaincodeStub stub, String function, String[] args) {
+ if(args.length!=4){
+ return "{\"Error\":\"Incorrect number of arguments. Expecting 4\"}";
+ }
+ try{
+ int valA = Integer.parseInt(args[1]);
+ int valB = Integer.parseInt(args[3]);
+ stub.putState(args[0], args[1]);
+ stub.putState(args[2], args[3]);
+ }catch(NumberFormatException e ){
+ return "{\"Error\":\"Expecting integer value for asset holding\"}";
+ }
+ return null;
+ }
+
+
+ @Override
+ public String query(ChaincodeStub stub, String function, String[] args) {
+ if(args.length!=1){
+ return "{\"Error\":\"Incorrect number of arguments. Expecting name of the person to query\"}";
+ }
+ String am =stub.getState(args[0]);
+ if (am!=null&&!am.isEmpty()){
+ try{
+ int valA = Integer.parseInt(am);
+ return "{\"Name\":\"" + args[0] + "\",\"Amount\":\"" + am + "\"}";
+ }catch(NumberFormatException e ){
+ return "{\"Error\":\"Expecting integer value for asset holding\"}";
+ } }else{
+ return "{\"Error\":\"Failed to get state for " + args[0] + "\"}";
+ }
+
+
+ }
+
+ @Override
+ public String getChaincodeID() {
+ return "SimpleSample";
+ }
+
+ public static void main(String[] args) throws Exception {
+ new SimpleSample().start(args);
+ }
+
+
+}
diff --git a/core/chaincode/shim/java/src/main/java/org/hyperledger/java/fsm/CBDesc.java b/core/chaincode/shim/java/src/main/java/org/hyperledger/java/fsm/CBDesc.java
new file mode 100644
index 00000000000..ac2f414763a
--- /dev/null
+++ b/core/chaincode/shim/java/src/main/java/org/hyperledger/java/fsm/CBDesc.java
@@ -0,0 +1,37 @@
+/*
+Copyright DTCC 2016 All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package org.hyperledger.java.fsm;
+
+public class CBDesc {
+
+ public final CallbackType type;
+ public final String trigger;
+ public final Callback callback;
+
+ /**
+ *
+ * @param type
+ * @param trigger
+ * @param callback
+ */
+ public CBDesc(CallbackType type, String trigger, Callback callback) {
+ this.type = type;
+ this.trigger = trigger;
+ this.callback = callback;
+ }
+
+}
diff --git a/core/chaincode/shim/java/src/main/java/org/hyperledger/java/fsm/Callback.java b/core/chaincode/shim/java/src/main/java/org/hyperledger/java/fsm/Callback.java
new file mode 100644
index 00000000000..b0a5420bcf5
--- /dev/null
+++ b/core/chaincode/shim/java/src/main/java/org/hyperledger/java/fsm/Callback.java
@@ -0,0 +1,23 @@
+/*
+Copyright DTCC 2016 All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package org.hyperledger.java.fsm;
+
+public interface Callback {
+
+ public void run(Event event);
+
+}
diff --git a/core/chaincode/shim/java/src/main/java/org/hyperledger/java/fsm/CallbackKey.java b/core/chaincode/shim/java/src/main/java/org/hyperledger/java/fsm/CallbackKey.java
new file mode 100644
index 00000000000..2c94d4d371b
--- /dev/null
+++ b/core/chaincode/shim/java/src/main/java/org/hyperledger/java/fsm/CallbackKey.java
@@ -0,0 +1,58 @@
+/*
+Copyright DTCC 2016 All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package org.hyperledger.java.fsm;
+
+public class CallbackKey {
+
+ String target;
+ CallbackType type;
+
+ public CallbackKey(String target, CallbackType type) {
+ this.target = target;
+ this.type = type;
+ }
+
+ @Override
+ public int hashCode() {
+ final int prime = 31;
+ int result = 1;
+ result = prime * result + ((target == null) ? 0 : target.hashCode());
+ result = prime * result + ((type == null) ? 0 : type.hashCode());
+ return result;
+ }
+
+ @Override
+ public boolean equals(Object obj) {
+ if (this == obj)
+ return true;
+ if (obj == null)
+ return false;
+ if (getClass() != obj.getClass())
+ return false;
+ CallbackKey other = (CallbackKey) obj;
+ if (target == null) {
+ if (other.target != null)
+ return false;
+ } else if (!target.equals(other.target))
+ return false;
+ if (type != other.type)
+ return false;
+ return true;
+ }
+
+
+}
diff --git a/core/chaincode/shim/java/src/main/java/org/hyperledger/java/fsm/CallbackType.java b/core/chaincode/shim/java/src/main/java/org/hyperledger/java/fsm/CallbackType.java
new file mode 100644
index 00000000000..c4db630d51c
--- /dev/null
+++ b/core/chaincode/shim/java/src/main/java/org/hyperledger/java/fsm/CallbackType.java
@@ -0,0 +1,27 @@
+/*
+Copyright DTCC 2016 All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package org.hyperledger.java.fsm;
+
+public enum CallbackType {
+
+ NONE,
+ BEFORE_EVENT,
+ LEAVE_STATE,
+ ENTER_STATE,
+ AFTER_EVENT;
+
+}
diff --git a/core/chaincode/shim/java/src/main/java/org/hyperledger/java/fsm/Event.java b/core/chaincode/shim/java/src/main/java/org/hyperledger/java/fsm/Event.java
new file mode 100644
index 00000000000..9bf8e17d748
--- /dev/null
+++ b/core/chaincode/shim/java/src/main/java/org/hyperledger/java/fsm/Event.java
@@ -0,0 +1,78 @@
+/*
+Copyright DTCC 2016 All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package org.hyperledger.java.fsm;
+
+
+/** Holds the info that get passed as a reference in the callbacks */
+public class Event {
+
+ // A reference to the parent FSM.
+ public final FSM fsm;
+ // The event name.
+ public final String name;
+ // The state before the transition.
+ public final String src;
+ // The state after the transition.
+ public final String dst;
+ // An optional error that can be returned from a callback.
+ public Exception error = null;
+
+ // An internal flag set if the transition is canceled.
+ public boolean cancelled = false;
+ // An internal flag set if the transition should be asynchronous
+ public boolean async;
+
+ // An optional list of arguments passed to the callback.
+ public final Object[] args;
+
+
+ public Event(FSM fsm, String name, String src, String dst,
+ Exception error, boolean cancelled, boolean async, Object... args) {
+ this.fsm = fsm;
+ this.name = name;
+ this.src = src;
+ this.dst = dst;
+ this.error = error;
+ this.cancelled = cancelled;
+ this.async = async;
+ this.args = args;
+ }
+
+ /**
+ * Can be called in before_ or leave_ to cancel the
+ * current transition before it happens. It takes an optional error,
+ * which will overwrite the event's error if it had already been set.
+ */
+ public Exception cancel(Exception error) {
+ cancelled = true;
+ if (error != null) {
+ this.error = error;
+ }
+ return error;
+ }
+
+ /**
+ * Can be called in leave_ to do an asynchronous state transition.
+ * The current state transition will be on hold in the old state until a final
+ * call to Transition is made. This will complete the transition and possibly
+ * call the other callbacks.
+ */
+ public void async() {
+ async = true;
+ }
+
+}
\ No newline at end of file
diff --git a/core/chaincode/shim/java/src/main/java/org/hyperledger/java/fsm/EventDesc.java b/core/chaincode/shim/java/src/main/java/org/hyperledger/java/fsm/EventDesc.java
new file mode 100644
index 00000000000..4d951a76d35
--- /dev/null
+++ b/core/chaincode/shim/java/src/main/java/org/hyperledger/java/fsm/EventDesc.java
@@ -0,0 +1,25 @@
+package org.hyperledger.java.fsm;
+
+/**
+ * Represents an event when initializing the FSM.
+ * The event can have one or more source states that is valid for performing
+ * the transition. If the FSM is in one of the source states it will end up in
+ * the specified destination state, calling all defined callbacks as it goes.
+ */
+public class EventDesc {
+
+ /** The event name used when calling for a transition */
+ String name;
+
+ /** A slice of source states that the FSM must be in to perform a state transition */
+ String[] src;
+
+ /** The destination state that the FSM will be in if the transition succeeds */
+ String dst;
+
+ public EventDesc(String name, String dst, String... src) {
+ this.name = name;
+ this.src = src;
+ this.dst = dst;
+ }
+}
diff --git a/core/chaincode/shim/java/src/main/java/org/hyperledger/java/fsm/EventKey.java b/core/chaincode/shim/java/src/main/java/org/hyperledger/java/fsm/EventKey.java
new file mode 100644
index 00000000000..1b7cfd830ec
--- /dev/null
+++ b/core/chaincode/shim/java/src/main/java/org/hyperledger/java/fsm/EventKey.java
@@ -0,0 +1,64 @@
+/*
+Copyright DTCC 2016 All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package org.hyperledger.java.fsm;
+
+/** Key for the transition map */
+public class EventKey {
+
+ /** The name of the event that the key refers to */
+ public final String event;
+
+ /** The source from where the event can transition */
+ public final String src;
+
+ public EventKey(String event, String src) {
+ this.event = event;
+ this.src = src;
+ }
+
+ @Override
+ public int hashCode() {
+ final int prime = 31;
+ int result = 1;
+ result = prime * result + ((event == null) ? 0 : event.hashCode());
+ result = prime * result + ((src == null) ? 0 : src.hashCode());
+ return result;
+ }
+
+ @Override
+ public boolean equals(Object obj) {
+ if (this == obj)
+ return true;
+ if (obj == null)
+ return false;
+ if (getClass() != obj.getClass())
+ return false;
+ EventKey other = (EventKey) obj;
+ if (event == null) {
+ if (other.event != null)
+ return false;
+ } else if (!event.equals(other.event))
+ return false;
+ if (src == null) {
+ if (other.src != null)
+ return false;
+ } else if (!src.equals(other.src))
+ return false;
+ return true;
+ }
+
+}
diff --git a/core/chaincode/shim/java/src/main/java/org/hyperledger/java/fsm/FSM.java b/core/chaincode/shim/java/src/main/java/org/hyperledger/java/fsm/FSM.java
new file mode 100644
index 00000000000..1eb8cedce78
--- /dev/null
+++ b/core/chaincode/shim/java/src/main/java/org/hyperledger/java/fsm/FSM.java
@@ -0,0 +1,231 @@
+/*
+Copyright DTCC 2016 All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package org.hyperledger.java.fsm;
+
+import java.util.HashMap;
+import java.util.HashSet;
+
+import org.hyperledger.java.fsm.exceptions.AsyncException;
+import org.hyperledger.java.fsm.exceptions.CancelledException;
+import org.hyperledger.java.fsm.exceptions.InTrasistionException;
+import org.hyperledger.java.fsm.exceptions.InvalidEventException;
+import org.hyperledger.java.fsm.exceptions.NoTransitionException;
+import org.hyperledger.java.fsm.exceptions.NotInTransitionException;
+import org.hyperledger.java.fsm.exceptions.UnknownEventException;
+
+public class FSM {
+
+ /** The current state of the FSM */
+ private String current;
+
+ /** Maps events and sources states to destination states */
+ private final HashMap transitions;
+
+ /** Maps events and triggers to callback functions */
+ private final HashMap callbacks;
+
+ /** The internal transaction function used either directly
+ * or when transition is called in an asynchronous state transition. */
+ protected Runnable transition;
+
+ /** Calls the FSM's transition function */
+ private Transitioner transitioner;
+
+ private final HashSet allStates;
+ private final HashSet allEvents;
+
+
+ // NewFSM constructs a FSM from events and callbacks.
+ //
+ // The events and transitions are specified as a slice of Event structs
+ // specified as Events. Each Event is mapped to one or more internal
+ // transitions from Event.Src to Event.Dst.
+ //
+ // Callbacks are added as a map specified as Callbacks where the key is parsed
+ // as the callback event as follows, and called in the same order:
+ //
+ // 1. before_ - called before event named
+ //
+ // 2. before_event - called before all events
+ //
+ // 3. leave_ - called before leaving
+ //
+ // 4. leave_state - called before leaving all states
+ //
+ // 5. enter_ - called after eftering
+ //
+ // 6. enter_state - called after entering all states
+ //
+ // 7. after_ - called after event named
+ //
+ // 8. after_event - called after all events
+ //
+ // There are also two short form versions for the most commonly used callbacks.
+ // They are simply the name of the event or state:
+ //
+ // 1. - called after entering
+ //
+ // 2. - called after event named
+ //
+
+ public FSM(String initialState) {
+ current = initialState;
+ transitioner = new Transitioner();
+
+ transitions = new HashMap();
+ callbacks = new HashMap();
+
+ allEvents = new HashSet();
+ allStates = new HashSet();
+ }
+
+ /** Returns the current state of the FSM */
+ public String current() {
+ return current;
+ }
+
+ /** Returns whether or not the given state is the current state */
+ public boolean isCurrentState(String state) {
+ return state.equals(current);
+ }
+
+ /** Returns whether or not the given event can occur in the current state */
+ public boolean eventCanOccur(String eventName) {
+ return transitions.containsKey(new EventKey(eventName, current));
+ }
+
+ /** Returns whether or not the given event can occur in the current state */
+ public boolean eventCannotOccur(String eventName) {
+ return !eventCanOccur(eventName);
+ }
+
+ /** Initiates a state transition with the named event.
+ * The call takes a variable number of arguments
+ * that will be passed to the callback, if defined.
+ *
+ * It if the state change is ok or one of these errors:
+ * - event X inappropriate because previous transition did not complete
+ * - event X inappropriate in current state Y
+ * - event X does not exist
+ * - internal error on state transition
+ * @throws InTrasistionException
+ * @throws InvalidEventException
+ * @throws UnknownEventException
+ * @throws NoTransitionException
+ * @throws AsyncException
+ * @throws CancelledException
+ * @throws NotInTransitionException
+ */
+ public void raiseEvent(String eventName, Object... args)
+ throws InTrasistionException, InvalidEventException,
+ UnknownEventException, NoTransitionException, CancelledException,
+ AsyncException, NotInTransitionException {
+
+ if (transition != null) throw new InTrasistionException(eventName);
+
+ String dst = transitions.get(new EventKey(eventName, current));
+ if (dst == null) {
+ for (EventKey key : transitions.keySet()) {
+ if (key.event.equals(eventName)) {
+ throw new InvalidEventException(eventName, current);
+ }
+ }
+ throw new UnknownEventException(eventName);
+ }
+
+ Event event = new Event(this, eventName, current, dst, null, false, false, args);
+ callCallbacks(event, CallbackType.BEFORE_EVENT);
+
+ if (current.equals(dst)) {
+ callCallbacks(event, CallbackType.AFTER_EVENT);
+ throw new NoTransitionException(event.error);
+ }
+
+ // Setup the transition, call it later.
+ transition = () -> {
+ current = dst;
+ try {
+ callCallbacks(event, CallbackType.ENTER_STATE);
+ callCallbacks(event, CallbackType.AFTER_EVENT);
+ } catch (Exception e) {
+ throw new InternalError(e);
+ }
+ };
+
+ callCallbacks(event, CallbackType.LEAVE_STATE);
+
+ // Perform the rest of the transition, if not asynchronous.
+ transition();
+ }
+
+ // Transition wraps transitioner.transition.
+ public void transition() throws NotInTransitionException {
+ transitioner.transition(this);
+ }
+
+
+ /** Calls the callbacks of type 'type'; first the named then the general version.
+ * @throws CancelledException
+ * @throws AsyncException */
+ public void callCallbacks(Event event, CallbackType type) throws CancelledException, AsyncException {
+ String trigger = event.name;
+ if (type == CallbackType.LEAVE_STATE) trigger = event.src;
+ else if (type == CallbackType.ENTER_STATE) trigger = event.dst;
+
+ Callback[] callbacks = new Callback[] {
+ this.callbacks.get(new CallbackKey(trigger, type)), //Primary
+ this.callbacks.get(new CallbackKey("", type)), //General
+ };
+
+ for (Callback callback : callbacks) {
+ if (callback != null) {
+ callback.run(event);
+ if (type == CallbackType.LEAVE_STATE) {
+ if (event.cancelled) {
+ transition = null;
+ throw new CancelledException(event.error);
+ } else if (event.async) {
+ throw new AsyncException(event.error);
+ }
+ } else if (type == CallbackType.BEFORE_EVENT) {
+ if (event.cancelled) {
+ throw new CancelledException(event.error);
+ }
+ }
+ }
+ }
+ }
+
+ public void addEvents(EventDesc... events) {
+ // Build transition map and store sets of all events and states.
+ for (EventDesc event : events) {
+ for (String src : event.src) {
+ transitions.put(new EventKey(event.name, src), event.dst);
+ allStates.add(src);
+ }
+ allStates.add(event.dst);
+ allEvents.add(event.name);
+ }
+ }
+
+
+ public void addCallbacks(CBDesc... descs) {
+ for (CBDesc desc : descs) {
+ callbacks.put(new CallbackKey(desc.trigger, desc.type), desc.callback);
+ }
+ }
+}
diff --git a/core/chaincode/shim/java/src/main/java/org/hyperledger/java/fsm/Transitioner.java b/core/chaincode/shim/java/src/main/java/org/hyperledger/java/fsm/Transitioner.java
new file mode 100644
index 00000000000..45a06382f62
--- /dev/null
+++ b/core/chaincode/shim/java/src/main/java/org/hyperledger/java/fsm/Transitioner.java
@@ -0,0 +1,31 @@
+/*
+Copyright DTCC 2016 All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package org.hyperledger.java.fsm;
+
+import org.hyperledger.java.fsm.exceptions.NotInTransitionException;
+
+public class Transitioner {
+
+ public void transition(FSM fsm) throws NotInTransitionException {
+ if (fsm.transition == null) {
+ throw new NotInTransitionException();
+ }
+ fsm.transition.run();
+ fsm.transition = null;
+ }
+
+}
diff --git a/core/chaincode/shim/java/src/main/java/org/hyperledger/java/fsm/exceptions/AsyncException.java b/core/chaincode/shim/java/src/main/java/org/hyperledger/java/fsm/exceptions/AsyncException.java
new file mode 100644
index 00000000000..df7d9d8b96f
--- /dev/null
+++ b/core/chaincode/shim/java/src/main/java/org/hyperledger/java/fsm/exceptions/AsyncException.java
@@ -0,0 +1,33 @@
+/*
+Copyright DTCC 2016 All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package org.hyperledger.java.fsm.exceptions;
+
+public class AsyncException extends Exception {
+
+ public final Exception error;
+
+ public AsyncException() {
+ this(null);
+ }
+
+ public AsyncException(Exception error) {
+ super("Async started" + error == null ?
+ "" : " with error " + error.toString());
+ this.error = error;
+ }
+
+}
diff --git a/core/chaincode/shim/java/src/main/java/org/hyperledger/java/fsm/exceptions/CancelledException.java b/core/chaincode/shim/java/src/main/java/org/hyperledger/java/fsm/exceptions/CancelledException.java
new file mode 100644
index 00000000000..f7b6c6142d0
--- /dev/null
+++ b/core/chaincode/shim/java/src/main/java/org/hyperledger/java/fsm/exceptions/CancelledException.java
@@ -0,0 +1,33 @@
+/*
+Copyright DTCC 2016 All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package org.hyperledger.java.fsm.exceptions;
+
+public class CancelledException extends Exception {
+
+ public final Exception error;
+
+ public CancelledException() {
+ this(null);
+ }
+
+ public CancelledException(Exception error) {
+ super("The transition was cancelled" + error == null ?
+ "" : " with error " + error.toString());
+ this.error = error;
+ }
+
+}
diff --git a/core/chaincode/shim/java/src/main/java/org/hyperledger/java/fsm/exceptions/InTrasistionException.java b/core/chaincode/shim/java/src/main/java/org/hyperledger/java/fsm/exceptions/InTrasistionException.java
new file mode 100644
index 00000000000..fd5bee0edeb
--- /dev/null
+++ b/core/chaincode/shim/java/src/main/java/org/hyperledger/java/fsm/exceptions/InTrasistionException.java
@@ -0,0 +1,29 @@
+/*
+Copyright DTCC 2016 All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package org.hyperledger.java.fsm.exceptions;
+
+public class InTrasistionException extends Exception {
+
+ public final String event;
+
+ public InTrasistionException(String event) {
+ super("Event '" + event + "' is inappropriate because"
+ + " the previous trasaction had not completed");
+ this.event = event;
+ }
+
+}
diff --git a/core/chaincode/shim/java/src/main/java/org/hyperledger/java/fsm/exceptions/InvalidEventException.java b/core/chaincode/shim/java/src/main/java/org/hyperledger/java/fsm/exceptions/InvalidEventException.java
new file mode 100644
index 00000000000..dd1e3a6b273
--- /dev/null
+++ b/core/chaincode/shim/java/src/main/java/org/hyperledger/java/fsm/exceptions/InvalidEventException.java
@@ -0,0 +1,31 @@
+/*
+Copyright DTCC 2016 All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package org.hyperledger.java.fsm.exceptions;
+
+public class InvalidEventException extends Exception {
+
+ public final String event;
+ public final String state;
+
+ public InvalidEventException(String event, String state) {
+ super("Event '" + event + "' is innappropriate"
+ + " given the current state, " + state);
+ this.event = event;
+ this.state = state;
+ }
+
+}
diff --git a/core/chaincode/shim/java/src/main/java/org/hyperledger/java/fsm/exceptions/NoTransitionException.java b/core/chaincode/shim/java/src/main/java/org/hyperledger/java/fsm/exceptions/NoTransitionException.java
new file mode 100644
index 00000000000..485237d1f09
--- /dev/null
+++ b/core/chaincode/shim/java/src/main/java/org/hyperledger/java/fsm/exceptions/NoTransitionException.java
@@ -0,0 +1,32 @@
+/*
+Copyright DTCC 2016 All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package org.hyperledger.java.fsm.exceptions;
+
+public class NoTransitionException extends Exception {
+
+ public final Exception error;
+
+ public NoTransitionException() {
+ this(null);
+ }
+
+ public NoTransitionException(Exception error) {
+ super("No transition occurred" + (error == null ? "" : " because of error " + error.toString()));
+ this.error = error;
+ }
+
+}
diff --git a/core/chaincode/shim/java/src/main/java/org/hyperledger/java/fsm/exceptions/NotInTransitionException.java b/core/chaincode/shim/java/src/main/java/org/hyperledger/java/fsm/exceptions/NotInTransitionException.java
new file mode 100644
index 00000000000..7c9ed47559b
--- /dev/null
+++ b/core/chaincode/shim/java/src/main/java/org/hyperledger/java/fsm/exceptions/NotInTransitionException.java
@@ -0,0 +1,26 @@
+/*
+Copyright DTCC 2016 All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package org.hyperledger.java.fsm.exceptions;
+
+public class NotInTransitionException extends Exception {
+
+ public NotInTransitionException() {
+ super("The transition is inappropriate"
+ + " because there is no state change in progress");
+ }
+
+}
diff --git a/core/chaincode/shim/java/src/main/java/org/hyperledger/java/fsm/exceptions/UnknownEventException.java b/core/chaincode/shim/java/src/main/java/org/hyperledger/java/fsm/exceptions/UnknownEventException.java
new file mode 100644
index 00000000000..f3bb32b1976
--- /dev/null
+++ b/core/chaincode/shim/java/src/main/java/org/hyperledger/java/fsm/exceptions/UnknownEventException.java
@@ -0,0 +1,28 @@
+/*
+Copyright DTCC 2016 All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package org.hyperledger.java.fsm.exceptions;
+
+public class UnknownEventException extends Exception {
+
+ public final String event;
+
+ public UnknownEventException(String event) {
+ super("Event '" + event + "' does not exist");
+ this.event = event;
+ }
+
+}
diff --git a/core/chaincode/shim/java/src/main/java/org/hyperledger/java/helper/Channel.java b/core/chaincode/shim/java/src/main/java/org/hyperledger/java/helper/Channel.java
new file mode 100644
index 00000000000..58384147bdd
--- /dev/null
+++ b/core/chaincode/shim/java/src/main/java/org/hyperledger/java/helper/Channel.java
@@ -0,0 +1,66 @@
+/*
+Copyright DTCC 2016 All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package org.hyperledger.java.helper;
+
+import java.io.Closeable;
+import java.util.HashSet;
+import java.util.concurrent.LinkedBlockingQueue;
+
+public class Channel extends LinkedBlockingQueue implements Closeable {
+
+ private boolean closed = false;
+
+ private HashSet waiting = new HashSet<>();
+
+ //TODO add other methods to secure closing behavior
+
+ @Override
+ public E take() throws InterruptedException {
+ synchronized (waiting) {
+ if (closed) throw new InterruptedException("Channel closed");
+ waiting.add(Thread.currentThread());
+ }
+ E e = super.take();
+ synchronized (waiting) {
+ waiting.remove(Thread.currentThread());
+ }
+ return e;
+ }
+
+
+ @Override
+ public boolean add(E e) {
+ if (closed) {
+ throw new IllegalStateException("Channel is closed");
+ }
+ return super.add(e);
+ }
+
+
+ @Override
+ public void close() {
+ synchronized (waiting) {
+ closed = true;
+ for (Thread t : waiting) {
+ t.interrupt();
+ }
+ waiting.clear();
+ clear();
+ }
+ }
+
+}
diff --git a/core/chaincode/shim/java/src/main/java/org/hyperledger/java/shim/ChaincodeBase.java b/core/chaincode/shim/java/src/main/java/org/hyperledger/java/shim/ChaincodeBase.java
new file mode 100644
index 00000000000..9f085fe531d
--- /dev/null
+++ b/core/chaincode/shim/java/src/main/java/org/hyperledger/java/shim/ChaincodeBase.java
@@ -0,0 +1,224 @@
+/*
+Copyright DTCC 2016 All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package org.hyperledger.java.shim;
+
+import java.io.File;
+
+import javax.net.ssl.SSLException;
+
+import org.apache.commons.cli.CommandLine;
+import org.apache.commons.cli.DefaultParser;
+import org.apache.commons.cli.Options;
+
+import com.google.protobuf.ByteString;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+
+import io.grpc.ManagedChannel;
+import io.grpc.netty.GrpcSslContexts;
+import io.grpc.netty.NegotiationType;
+import io.grpc.netty.NettyChannelBuilder;
+import io.grpc.stub.StreamObserver;
+import io.netty.handler.ssl.SslContext;
+import protos.Chaincode.ChaincodeID;
+import protos.Chaincode.ChaincodeMessage;
+import protos.Chaincode.ChaincodeMessage.Type;
+import protos.ChaincodeSupportGrpc;
+import protos.ChaincodeSupportGrpc.ChaincodeSupportStub;
+
+public abstract class ChaincodeBase {
+
+ private static Log logger = LogFactory.getLog(ChaincodeBase.class);
+
+ public abstract String run(ChaincodeStub stub, String function, String[] args);
+ public abstract String query(ChaincodeStub stub, String function, String[] args);
+ public abstract String getChaincodeID();
+
+ public static final String DEFAULT_HOST = "127.0.0.1";
+ public static final int DEFAULT_PORT = 30303;
+
+ private String host = DEFAULT_HOST;
+ private int port = DEFAULT_PORT;
+
+ private Handler handler;
+ private String id = getChaincodeID();
+
+ // Start entry point for chaincodes bootstrap.
+ public void start(String[] args) {
+ Options options = new Options();
+ options.addOption("a", "peerAddress", true, "Address of peer to connect to");
+ options.addOption("s", "securityEnabled", false, "Present if security is enabled");
+ options.addOption("i", "id", true, "Identity of chaincode");
+
+ try {
+ CommandLine cl = new DefaultParser().parse(options, args);
+ if (cl.hasOption('a')) {
+ host = cl.getOptionValue('a');
+ port = new Integer(host.split(":")[1]);
+ host = host.split(":")[0];
+ }
+ if (cl.hasOption('s')) {
+ //TODO
+ logger.warn("securityEnabled option not implemented yet");
+ }
+ if (cl.hasOption('i')) {
+ id = cl.getOptionValue('i');
+ }
+ } catch (Exception e) {
+ logger.warn("cli parsing failed with exception",e);
+
+ }
+
+ Runnable chaincode = () -> {
+ logger.trace("chaincode started");
+ ManagedChannel connection = newPeerClientConnection();
+ logger.trace("connection created");
+ chatWithPeer(connection);
+ logger.trace("chatWithPeer DONE");
+ };
+ new Thread(chaincode).start();
+ }
+
+ public ManagedChannel newPeerClientConnection() {
+ NettyChannelBuilder builder = NettyChannelBuilder.forAddress(host, port);
+ //TODO security
+ if (false) {//"true".equals(params.get("peer.tls.enabled"))) {
+ try {
+ SslContext sslContext = GrpcSslContexts.forClient().trustManager(
+ new File("pathToServerCertPemFile")).keyManager(new File("pathToOwnCertPemFile"),
+ new File("pathToOwnPrivateKeyPemFile")).build();
+ builder.negotiationType(NegotiationType.TLS);
+ builder.sslContext(sslContext);
+ } catch (SSLException e) {
+ logger.error("failed connect to peer with SSLException",e);
+ }
+ } else {
+ builder.usePlaintext(true);
+ }
+
+ return builder.build();
+ }
+
+ public void chatWithPeer(ManagedChannel connection) {
+ // Establish stream with validating peer
+ ChaincodeSupportStub stub = ChaincodeSupportGrpc.newStub(connection);
+
+ StreamObserver requestObserver = null;
+ try {
+ requestObserver = stub.register(
+ new StreamObserver() {
+
+ @Override
+ public void onNext(ChaincodeMessage message) {
+ try {
+ logger.debug(String.format("[%s]Received message %s from org.hyperledger.java.shim",
+ Handler.shortUUID(message.getUuid()), message.getType()));
+ handler.handleMessage(message);
+ } catch (Exception e) {
+ e.printStackTrace();
+ System.exit(-1);
+ //TODO
+ // } else if (err != nil) {
+ // logger.Error(fmt.Sprintf("Received error from server: %s, ending chaincode stream", err))
+ // return
+ // } else if (in == nil) {
+ // err = fmt.Errorf("Received nil message, ending chaincode stream")
+ // logger.debug("Received nil message, ending chaincode stream")
+ // return
+ }
+ }
+
+ @Override
+ public void onError(Throwable e) {
+ logger.error("Unable to connect to peer server: "+ e.getMessage());
+ System.exit(-1);
+ }
+
+ @Override
+ public void onCompleted() {
+ connection.shutdown();
+ handler.nextState.close();
+ }
+ });
+ } catch (Exception e) {
+ logger.error("Unable to connect to peer server");
+ System.exit(-1);
+ }
+
+ // Create the org.hyperledger.java.shim handler responsible for all control logic
+ handler = new Handler(requestObserver, this);
+
+ // Send the ChaincodeID during register.
+ ChaincodeID chaincodeID = ChaincodeID.newBuilder()
+ .setName(id)//TODO params.get("chaincode.id.name"))
+ .build();
+
+ ChaincodeMessage payload = ChaincodeMessage.newBuilder()
+ .setPayload(chaincodeID.toByteString())
+ .setType(Type.REGISTER)
+ .build();
+
+ // Register on the stream
+ logger.debug(String.format("Registering as '%s' ... sending %s", id, Type.REGISTER));
+ handler.serialSend(payload);
+
+ while (true) {
+ try {
+ NextStateInfo nsInfo = handler.nextState.take();
+ ChaincodeMessage message = nsInfo.message;
+ handler.handleMessage(message);
+ //keepalive messages are PONGs to the fabric's PINGs
+ if (nsInfo.sendToCC || message.getType() == Type.KEEPALIVE) {
+ if (message.getType() == Type.KEEPALIVE){
+ logger.debug("Sending KEEPALIVE response");
+ }else {
+ logger.debug("[" + Handler.shortUUID(message.getUuid()) + "]Send state message " + message.getType());
+ }
+ handler.serialSend(message);
+ }
+ } catch (Exception e) {
+ break;
+ }
+ }
+ }
+
+ public ByteString runRaw(ChaincodeStub stub, String function, String[] args) {
+ return null;
+ }
+
+ public ByteString queryRaw(ChaincodeStub stub, String function, String[] args) {
+ return null;
+ }
+
+ protected ByteString runHelper(ChaincodeStub stub, String function, String[] args) {
+ ByteString ret = runRaw(stub, function, args);
+ if (ret == null) {
+ String tmp = run(stub, function, args);
+ ret = ByteString.copyFromUtf8(tmp == null ? "" : tmp);
+ }
+ return ret;
+ }
+
+ protected ByteString queryHelper(ChaincodeStub stub, String function, String[] args) {
+ ByteString ret = queryRaw(stub, function, args);
+ if (ret == null) {
+ ret = ByteString.copyFromUtf8(query(stub, function, args));
+ }
+ return ret;
+ }
+}
\ No newline at end of file
diff --git a/core/chaincode/shim/java/src/main/java/org/hyperledger/java/shim/ChaincodeStub.java b/core/chaincode/shim/java/src/main/java/org/hyperledger/java/shim/ChaincodeStub.java
new file mode 100644
index 00000000000..3704da4f8c4
--- /dev/null
+++ b/core/chaincode/shim/java/src/main/java/org/hyperledger/java/shim/ChaincodeStub.java
@@ -0,0 +1,577 @@
+/*
+Copyright DTCC 2016 All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package org.hyperledger.java.shim;
+
+import com.google.protobuf.ByteString;
+
+public class ChaincodeStub {
+
+ private final String uuid;
+ private final Handler handler;
+
+ public ChaincodeStub(String uuid, Handler handler) {
+ this.uuid = uuid;
+ this.handler = handler;
+ }
+
+ /**
+ * Gets the UUID of this stub
+ * @return the id used to identify this communication channel
+ */
+ public String getUuid() {
+ return uuid;
+ }
+
+ /**
+ * Get the state of the provided key from the ledger, and returns is as a string
+ * @param key the key of the desired state
+ * @return the String value of the requested state
+ */
+ public String getState(String key) {
+ return handler.handleGetState(key, uuid).toStringUtf8();
+ }
+
+ /**
+ * Puts the given state into a ledger, automatically wrapping it in a ByteString
+ * @param key reference key
+ * @param value value to be put
+ */
+ public void putState(String key, String value) {
+ handler.handlePutState(key, ByteString.copyFromUtf8(value), uuid);
+ }
+
+ /**
+ * Deletes the state of the given key from the ledger
+ * @param key key of the state to be deleted
+ */
+ public void delState(String key) {
+ handler.handleDeleteState(key, uuid);
+ }
+
+ /**
+ *
+ * @param startKey
+ * @param endKey
+ * @param limit
+ * @return
+ */
+// public HashMap rangeQueryState(String startKey, String endKey, int limit) {
+// HashMap map = new HashMap<>();
+// for (RangeQueryStateKeyValue mapping : handler.handleRangeQueryState(
+// startKey, endKey, limit, uuid).getKeysAndValuesList()) {
+// map.put(mapping.getKey(), mapping.getValue().toStringUtf8());
+// }
+// return map;
+// }
+
+ /**
+ *
+ * @param chaincodeName
+ * @param function
+ * @param args
+ * @return
+ */
+ public String invokeChaincode(String chaincodeName, String function, String[] args) {
+ return handler.handleInvokeChaincode(chaincodeName, function, args, uuid).toStringUtf8();
+ }
+
+ /**
+ *
+ * @param chaincodeName
+ * @param function
+ * @param args
+ * @return
+ */
+ public String queryChaincode(String chaincodeName, String function, String[] args) {
+ return handler.handleQueryChaincode(chaincodeName, function, args, uuid).toStringUtf8();
+ }
+
+ //------RAW CALLS------
+
+ /**
+ *
+ * @param key
+ * @return
+ */
+ public ByteString getRawState(String key) {
+ return handler.handleGetState(key, uuid);
+ }
+
+ /**
+ *
+ * @param key
+ * @param value
+ */
+ public void putRawState(String key, ByteString value) {
+ handler.handlePutState(key, value, uuid);
+ }
+
+ /**
+ *
+ * @param startKey
+ * @param endKey
+ * @param limit
+ * @return
+ */
+// public RangeQueryStateResponse rangeQueryRawState(String startKey, String endKey, int limit) {
+// return handler.handleRangeQueryState(startKey, endKey, limit, uuid);
+// }
+
+ /**
+ *
+ * @param chaincodeName
+ * @param function
+ * @param args
+ * @return
+ */
+ public ByteString queryRawChaincode(String chaincodeName, String function, String[] args) {
+ return handler.handleQueryChaincode(chaincodeName, function, args, uuid);
+ }
+
+ /**
+ * Invokes the provided chaincode with the given function and arguments, and returns the
+ * raw ByteString value that invocation generated.
+ * @param chaincodeName The name of the chaincode to invoke
+ * @param function the function parameter to pass to the chaincode
+ * @param args the arguments to be provided in the chaincode call
+ * @return the value returned by the chaincode call
+ */
+ public ByteString invokeRawChaincode(String chaincodeName, String function, String[] args) {
+ return handler.handleInvokeChaincode(chaincodeName, function, args, uuid);
+ }
+
+
+// //TODO Table calls
+ public void createTable(String name) {
+
+// if (getTable(name) != null)
+// throw new RuntimeException("CreateTable operation failed. Table %s already exists.");
+
+// if err != ErrTableNotFound {
+// return fmt.Errorf("CreateTable operation failed. %s", err)
+// }
+//
+// if columnDefinitions == nil || len(columnDefinitions) == 0 {
+// return errors.New("Invalid column definitions. Tables must contain at least one column.")
+// }
+//
+// hasKey := false
+// nameMap := make(map[string]bool)
+// for i, definition := range columnDefinitions {
+//
+// // Check name
+// if definition == nil {
+// return fmt.Errorf("Column definition %d is invalid. Definition must not be nil.", i)
+// }
+// if len(definition.Name) == 0 {
+// return fmt.Errorf("Column definition %d is invalid. Name must be 1 or more characters.", i)
+// }
+// if _, exists := nameMap[definition.Name]; exists {
+// return fmt.Errorf("Invalid table. Table contains duplicate column name '%s'.", definition.Name)
+// }
+// nameMap[definition.Name] = true
+//
+// // Check type
+// switch definition.Type {
+// case ColumnDefinition_STRING:
+// case ColumnDefinition_INT32:
+// case ColumnDefinition_INT64:
+// case ColumnDefinition_UINT32:
+// case ColumnDefinition_UINT64:
+// case ColumnDefinition_BYTES:
+// case ColumnDefinition_BOOL:
+// default:
+// return fmt.Errorf("Column definition %s does not have a valid type.", definition.Name)
+// }
+//
+// if definition.Key {
+// hasKey = true
+// }
+// }
+//
+// if !hasKey {
+// return errors.New("Inavlid table. One or more columns must be a key.")
+// }
+//
+// table := &Table{name, columnDefinitions}
+// tableBytes, err := proto.Marshal(table)
+// if err != nil {
+// return fmt.Errorf("Error marshalling table: %s", err)
+// }
+// tableNameKey, err := getTableNameKey(name)
+// if err != nil {
+// return fmt.Errorf("Error creating table key: %s", err)
+// }
+// try {
+// stub.PutState(tableNameKey, tableBytes)
+// } catch (Exception e) {
+// throw new RuntimeException("Error inserting table in state: " + e.getMessage());
+// }
+// return;
+ }
+//
+// // GetTable returns the table for the specified table name or ErrTableNotFound
+// // if the table does not exist.
+// func (stub *ChaincodeStub) GetTable(tableName string) (*Table, error) {
+// return stub.getTable(tableName)
+// }
+//
+// // DeleteTable deletes and entire table and all associated row
+// func (stub *ChaincodeStub) DeleteTable(tableName string) error {
+// tableNameKey, err := getTableNameKey(tableName)
+// if err != nil {
+// return err
+// }
+//
+// // Delete rows
+// iter, err := stub.RangeQueryState(tableNameKey+"1", tableNameKey+":")
+// if err != nil {
+// return fmt.Errorf("Error deleting table: %s", err)
+// }
+// defer iter.Close()
+// for iter.HasNext() {
+// key, _, err := iter.Next()
+// if err != nil {
+// return fmt.Errorf("Error deleting table: %s", err)
+// }
+// err = stub.DelState(key)
+// if err != nil {
+// return fmt.Errorf("Error deleting table: %s", err)
+// }
+// }
+//
+// return stub.DelState(tableNameKey)
+// }
+//
+// // InsertRow inserts a new row into the specified table.
+// // Returns -
+// // true and no error if the row is successfully inserted.
+// // false and no error if a row already exists for the given key.
+// // false and a TableNotFoundError if the specified table name does not exist.
+// // false and an error if there is an unexpected error condition.
+// func (stub *ChaincodeStub) InsertRow(tableName string, row Row) (bool, error) {
+// return stub.insertRowInternal(tableName, row, false)
+// }
+//
+// // ReplaceRow updates the row in the specified table.
+// // Returns -
+// // true and no error if the row is successfully updated.
+// // false and no error if a row does not exist the given key.
+// // flase and a TableNotFoundError if the specified table name does not exist.
+// // false and an error if there is an unexpected error condition.
+// func (stub *ChaincodeStub) ReplaceRow(tableName string, row Row) (bool, error) {
+// return stub.insertRowInternal(tableName, row, true)
+// }
+//
+// // GetRow fetches a row from the specified table for the given key.
+// func (stub *ChaincodeStub) GetRow(tableName string, key []Column) (Row, error) {
+//
+// var row Row
+//
+// keyString, err := buildKeyString(tableName, key)
+// if err != nil {
+// return row, err
+// }
+//
+// rowBytes, err := stub.GetState(keyString)
+// if err != nil {
+// return row, fmt.Errorf("Error fetching row from DB: %s", err)
+// }
+//
+// err = proto.Unmarshal(rowBytes, &row)
+// if err != nil {
+// return row, fmt.Errorf("Error unmarshalling row: %s", err)
+// }
+//
+// return row, nil
+//
+// }
+//
+// // GetRows returns multiple rows based on a partial key. For example, given table
+// // | A | B | C | D |
+// // where A, C and D are keys, GetRows can be called with [A, C] to return
+// // all rows that have A, C and any value for D as their key. GetRows could
+// // also be called with A only to return all rows that have A and any value
+// // for C and D as their key.
+// func (stub *ChaincodeStub) GetRows(tableName string, key []Column) (<-chan Row, error) {
+//
+// keyString, err := buildKeyString(tableName, key)
+// if err != nil {
+// return nil, err
+// }
+//
+// iter, err := stub.RangeQueryState(keyString+"1", keyString+":")
+// if err != nil {
+// return nil, fmt.Errorf("Error fetching rows: %s", err)
+// }
+// defer iter.Close()
+//
+// rows := make(chan Row)
+//
+// go func() {
+// for iter.HasNext() {
+// _, rowBytes, err := iter.Next()
+// if err != nil {
+// close(rows)
+// }
+//
+// var row Row
+// err = proto.Unmarshal(rowBytes, &row)
+// if err != nil {
+// close(rows)
+// }
+//
+// rows <- row
+//
+// }
+// close(rows)
+// }()
+//
+// return rows, nil
+//
+// }
+//
+// // DeleteRow deletes the row for the given key from the specified table.
+// func (stub *ChaincodeStub) DeleteRow(tableName string, key []Column) error {
+//
+// keyString, err := buildKeyString(tableName, key)
+// if err != nil {
+// return err
+// }
+//
+// err = stub.DelState(keyString)
+// if err != nil {
+// return fmt.Errorf("DeleteRow operation error. Error deleting row: %s", err)
+// }
+//
+// return nil
+// }
+//
+// // VerifySignature ...
+// func (stub *ChaincodeStub) VerifySignature(certificate, signature, message []byte) (bool, error) {
+// // Instantiate a new SignatureVerifier
+// sv := ecdsa.NewX509ECDSASignatureVerifier()
+//
+// // Verify the signature
+// return sv.Verify(certificate, signature, message)
+// }
+//
+// // GetCallerCertificate returns caller certificate
+// func (stub *ChaincodeStub) GetCallerCertificate() ([]byte, error) {
+// return stub.securityContext.CallerCert, nil
+// }
+//
+// // GetCallerMetadata returns caller metadata
+// func (stub *ChaincodeStub) GetCallerMetadata() ([]byte, error) {
+// return stub.securityContext.Metadata, nil
+// }
+//
+// // GetBinding returns tx binding
+// func (stub *ChaincodeStub) GetBinding() ([]byte, error) {
+// return stub.securityContext.Binding, nil
+// }
+//
+// // GetPayload returns tx payload
+// func (stub *ChaincodeStub) GetPayload() ([]byte, error) {
+// return stub.securityContext.Payload, nil
+// }
+//
+// func (stub *ChaincodeStub) getTable(tableName string) (*Table, error) {
+//
+// tableName, err := getTableNameKey(tableName)
+// if err != nil {
+// return nil, err
+// }
+//
+// tableBytes, err := stub.GetState(tableName)
+// if tableBytes == nil {
+// return nil, ErrTableNotFound
+// }
+// if err != nil {
+// return nil, fmt.Errorf("Error fetching table: %s", err)
+// }
+// table := &Table{}
+// err = proto.Unmarshal(tableBytes, table)
+// if err != nil {
+// return nil, fmt.Errorf("Error unmarshalling table: %s", err)
+// }
+//
+// return table, nil
+// }
+//
+// func validateTableName(name string) error {
+// if len(name) == 0 {
+// return errors.New("Inavlid table name. Table name must be 1 or more characters.")
+// }
+//
+// return nil
+// }
+//
+// func getTableNameKey(name string) (string, error) {
+// err := validateTableName(name)
+// if err != nil {
+// return "", err
+// }
+//
+// return strconv.Itoa(len(name)) + name, nil
+// }
+//
+// func buildKeyString(tableName string, keys []Column) (string, error) {
+//
+// var keyBuffer bytes.Buffer
+//
+// tableNameKey, err := getTableNameKey(tableName)
+// if err != nil {
+// return "", err
+// }
+//
+// keyBuffer.WriteString(tableNameKey)
+//
+// for _, key := range keys {
+//
+// var keyString string
+// switch key.Value.(type) {
+// case *Column_String_:
+// keyString = key.GetString_()
+// case *Column_Int32:
+// // b := make([]byte, 4)
+// // binary.LittleEndian.PutUint32(b, uint32(key.GetInt32()))
+// // keyBuffer.Write(b)
+// keyString = strconv.FormatInt(int64(key.GetInt32()), 10)
+// case *Column_Int64:
+// keyString = strconv.FormatInt(key.GetInt64(), 10)
+// case *Column_Uint32:
+// keyString = strconv.FormatUint(uint64(key.GetInt32()), 10)
+// case *Column_Uint64:
+// keyString = strconv.FormatUint(key.GetUint64(), 10)
+// case *Column_Bytes:
+// keyString = string(key.GetBytes())
+// case *Column_Bool:
+// keyString = strconv.FormatBool(key.GetBool())
+// }
+//
+// keyBuffer.WriteString(strconv.Itoa(len(keyString)))
+// keyBuffer.WriteString(keyString)
+// }
+//
+// return keyBuffer.String(), nil
+// }
+//
+// func getKeyAndVerifyRow(table Table, row Row) ([]Column, error) {
+//
+// var keys []Column
+//
+// if row.Columns == nil || len(row.Columns) != len(table.ColumnDefinitions) {
+// return keys, fmt.Errorf("Table '%s' defines %d columns, but row has %d columns.",
+// table.Name, len(table.ColumnDefinitions), len(row.Columns))
+// }
+//
+// for i, column := range row.Columns {
+//
+// // Check types
+// var expectedType bool
+// switch column.Value.(type) {
+// case *Column_String_:
+// expectedType = table.ColumnDefinitions[i].Type == ColumnDefinition_STRING
+// case *Column_Int32:
+// expectedType = table.ColumnDefinitions[i].Type == ColumnDefinition_INT32
+// case *Column_Int64:
+// expectedType = table.ColumnDefinitions[i].Type == ColumnDefinition_INT64
+// case *Column_Uint32:
+// expectedType = table.ColumnDefinitions[i].Type == ColumnDefinition_UINT32
+// case *Column_Uint64:
+// expectedType = table.ColumnDefinitions[i].Type == ColumnDefinition_UINT64
+// case *Column_Bytes:
+// expectedType = table.ColumnDefinitions[i].Type == ColumnDefinition_BYTES
+// case *Column_Bool:
+// expectedType = table.ColumnDefinitions[i].Type == ColumnDefinition_BOOL
+// default:
+// expectedType = false
+// }
+// if !expectedType {
+// return keys, fmt.Errorf("The type for table '%s', column '%s' is '%s', but the column in the row does not match.",
+// table.Name, table.ColumnDefinitions[i].Name, table.ColumnDefinitions[i].Type)
+// }
+//
+// if table.ColumnDefinitions[i].Key {
+// keys = append(keys, *column)
+// }
+//
+// }
+//
+// return keys, nil
+// }
+//
+// func (stub *ChaincodeStub) isRowPrsent(tableName string, key []Column) (bool, error) {
+// keyString, err := buildKeyString(tableName, key)
+// if err != nil {
+// return false, err
+// }
+// rowBytes, err := stub.GetState(keyString)
+// if err != nil {
+// return false, fmt.Errorf("Error fetching row for key %s: %s", keyString, err)
+// }
+// if rowBytes != nil {
+// return true, nil
+// }
+// return false, nil
+// }
+//
+// // insertRowInternal inserts a new row into the specified table.
+// // Returns -
+// // true and no error if the row is successfully inserted.
+// // false and no error if a row already exists for the given key.
+// // flase and a TableNotFoundError if the specified table name does not exist.
+// // false and an error if there is an unexpected error condition.
+// func (stub *ChaincodeStub) insertRowInternal(tableName string, row Row, update bool) (bool, error) {
+//
+// table, err := stub.getTable(tableName)
+// if err != nil {
+// return false, err
+// }
+//
+// key, err := getKeyAndVerifyRow(*table, row)
+// if err != nil {
+// return false, err
+// }
+//
+// present, err := stub.isRowPrsent(tableName, key)
+// if err != nil {
+// return false, err
+// }
+// if (present && !update) || (!present && update) {
+// return false, nil
+// }
+//
+// rowBytes, err := proto.Marshal(&row)
+// if err != nil {
+// return false, fmt.Errorf("Error marshalling row: %s", err)
+// }
+//
+// keyString, err := buildKeyString(tableName, key)
+// if err != nil {
+// return false, err
+// }
+// err = stub.PutState(keyString, rowBytes)
+// if err != nil {
+// return false, fmt.Errorf("Error inserting row in table %s: %s", tableName, err)
+// }
+//
+// return true, nil
+
+
+}
diff --git a/core/chaincode/shim/java/src/main/java/org/hyperledger/java/shim/Handler.java b/core/chaincode/shim/java/src/main/java/org/hyperledger/java/shim/Handler.java
new file mode 100644
index 00000000000..a8c2702e4d0
--- /dev/null
+++ b/core/chaincode/shim/java/src/main/java/org/hyperledger/java/shim/Handler.java
@@ -0,0 +1,956 @@
+/**
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements. See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership. The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License. You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing,
+software distributed under the License is distributed on an
+"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+KIND, either express or implied. See the License for the
+specific language governing permissions and limitations
+under the License.
+ */
+
+package org.hyperledger.java.shim;
+
+import static org.hyperledger.java.fsm.CallbackType.AFTER_EVENT;
+import static org.hyperledger.java.fsm.CallbackType.BEFORE_EVENT;
+import static org.hyperledger.java.fsm.CallbackType.ENTER_STATE;
+import static protos.Chaincode.ChaincodeMessage.Type.COMPLETED;
+import static protos.Chaincode.ChaincodeMessage.Type.DEL_STATE;
+import static protos.Chaincode.ChaincodeMessage.Type.ERROR;
+import static protos.Chaincode.ChaincodeMessage.Type.GET_STATE;
+import static protos.Chaincode.ChaincodeMessage.Type.INIT;
+import static protos.Chaincode.ChaincodeMessage.Type.INVOKE_CHAINCODE;
+import static protos.Chaincode.ChaincodeMessage.Type.INVOKE_QUERY;
+import static protos.Chaincode.ChaincodeMessage.Type.PUT_STATE;
+import static protos.Chaincode.ChaincodeMessage.Type.QUERY;
+import static protos.Chaincode.ChaincodeMessage.Type.QUERY_COMPLETED;
+import static protos.Chaincode.ChaincodeMessage.Type.QUERY_ERROR;
+import static protos.Chaincode.ChaincodeMessage.Type.RANGE_QUERY_STATE;
+import static protos.Chaincode.ChaincodeMessage.Type.READY;
+import static protos.Chaincode.ChaincodeMessage.Type.REGISTERED;
+import static protos.Chaincode.ChaincodeMessage.Type.RESPONSE;
+import static protos.Chaincode.ChaincodeMessage.Type.TRANSACTION;
+
+import java.util.Arrays;
+import java.util.HashMap;
+
+import com.google.protobuf.ByteString;
+import com.google.protobuf.ProtocolStringList;
+
+import org.hyperledger.java.fsm.CBDesc;
+import org.hyperledger.java.fsm.Event;
+import org.hyperledger.java.fsm.EventDesc;
+import org.hyperledger.java.fsm.FSM;
+import org.hyperledger.java.fsm.exceptions.CancelledException;
+import org.hyperledger.java.fsm.exceptions.NoTransitionException;
+import org.hyperledger.java.helper.Channel;
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import io.grpc.stub.StreamObserver;
+import protos.Chaincode.ChaincodeID;
+import protos.Chaincode.ChaincodeInput;
+import protos.Chaincode.ChaincodeMessage;
+import protos.Chaincode.ChaincodeMessage.Builder;
+import protos.Chaincode.ChaincodeSpec;
+import protos.Chaincode.PutStateInfo;
+
+public class Handler {
+
+ private static Log logger = LogFactory.getLog(Handler.class);
+
+ private StreamObserver chatStream;
+ private ChaincodeBase chaincode;
+
+ private HashMap isTransaction;
+ private HashMap> responseChannel;
+ public Channel nextState;
+
+ private FSM fsm;
+
+ public Handler(StreamObserver chatStream, ChaincodeBase chaincode) {
+ this.chatStream = chatStream;
+ this.chaincode = chaincode;
+
+ responseChannel = new HashMap>();
+ isTransaction = new HashMap();
+ nextState = new Channel();
+
+ fsm = new FSM("created");
+
+ fsm.addEvents(
+ // Event Name Destination Sources States
+ new EventDesc(REGISTERED.toString(), "established", "created"),
+ new EventDesc(INIT.toString(), "init", "established"),
+ new EventDesc(READY.toString(), "ready", "established"),
+ new EventDesc(ERROR.toString(), "established", "init"),
+ new EventDesc(RESPONSE.toString(), "init", "init"),
+ new EventDesc(COMPLETED.toString(), "ready", "init"),
+ new EventDesc(TRANSACTION.toString(), "transaction", "ready"),
+ new EventDesc(COMPLETED.toString(), "ready", "transaction"),
+ new EventDesc(ERROR.toString(), "ready", "transaction"),
+ new EventDesc(RESPONSE.toString(), "transaction", "transaction"),
+ new EventDesc(QUERY.toString(), "transaction", "transaction"),
+ new EventDesc(QUERY.toString(), "ready", "ready"),
+ new EventDesc(RESPONSE.toString(), "ready", "ready")
+ );
+
+ fsm.addCallbacks(
+ // Type Trigger Callback
+ new CBDesc(BEFORE_EVENT, REGISTERED.toString(), (event) -> beforeRegistered(event)),
+ new CBDesc(AFTER_EVENT, RESPONSE.toString(), (event) -> afterResponse(event)),
+ new CBDesc(AFTER_EVENT, ERROR.toString(), (event) -> afterError(event)),
+ new CBDesc(ENTER_STATE, "init", (event) -> enterInitState(event)),
+ new CBDesc(ENTER_STATE, "transaction", (event) -> enterTransactionState(event)),
+ new CBDesc(BEFORE_EVENT, QUERY.toString(), (event) -> beforeQuery(event))
+ );
+ }
+
+ public static String shortUUID(String uuid) {
+ if (uuid.length() < 8) {
+ return uuid;
+ } else {
+ return uuid.substring(0, 8);
+ }
+ }
+
+ public void triggerNextState(ChaincodeMessage message, boolean send) {
+ if(logger.isTraceEnabled())logger.trace("triggerNextState for message "+message);
+ nextState.add(new NextStateInfo(message, send));
+ }
+
+ public synchronized void serialSend(ChaincodeMessage message) {
+ try {
+ chatStream.onNext(message);
+ } catch (Exception e) {
+ logger.error(String.format("[%s]Error sending %s: %s",
+ shortUUID(message), message.getType(), e));
+ throw new RuntimeException(String.format("Error sending %s: %s", message.getType(), e));
+ }
+ if(logger.isTraceEnabled())logger.trace("serialSend complete for message "+message);
+ }
+
+ public synchronized Channel createChannel(String uuid) {
+ if (responseChannel.containsKey(uuid)) {
+ throw new IllegalStateException("[" + shortUUID(uuid) + "] Channel exists");
+ }
+
+ Channel channel = new Channel();
+ responseChannel.put(uuid, channel);
+ if(logger.isTraceEnabled())logger.trace("channel created with uuid "+uuid);
+
+ return channel;
+ }
+
+ public synchronized void sendChannel(ChaincodeMessage message) {
+ if (!responseChannel.containsKey(message.getUuid())) {
+ throw new IllegalStateException("[" + shortUUID(message) + "]sendChannel does not exist");
+ }
+
+ logger.debug(String.format("[%s]Before send", shortUUID(message)));
+ responseChannel.get(message.getUuid()).add(message);
+ logger.debug(String.format("[%s]After send", shortUUID(message)));
+ }
+
+ public ChaincodeMessage receiveChannel(Channel channel) {
+ try {
+ return channel.take();
+ } catch (InterruptedException e) {
+ logger.debug("channel.take() failed with InterruptedException");
+
+ //Channel has been closed?
+ //TODO
+ return null;
+ }
+ }
+
+ public synchronized void deleteChannel(String uuid) {
+ Channel channel = responseChannel.remove(uuid);
+ if (channel != null) {
+ channel.close();
+ }
+
+ if(logger.isTraceEnabled())logger.trace("deleteChannel done with uuid "+uuid);
+ }
+
+ /**
+ * Marks a UUID as either a transaction or a query
+ * @param uuid ID to be marked
+ * @param isTransaction true for transaction, false for query
+ * @return whether or not the UUID was successfully marked
+ */
+ public synchronized boolean markIsTransaction(String uuid, boolean isTransaction) {
+ if (this.isTransaction == null) {
+ return false;
+ }
+
+ this.isTransaction.put(uuid, isTransaction);
+ return true;
+ }
+
+ public synchronized void deleteIsTransaction(String uuid) {
+ isTransaction.remove(uuid);
+ }
+
+ public void beforeRegistered(Event event) {
+ messageHelper(event);
+ logger.debug(String.format("Received %s, ready for invocations", REGISTERED));
+ }
+
+ /**
+ * Handles requests to initialize chaincode
+ * @param message chaincode to be initialized
+ */
+ public void handleInit(ChaincodeMessage message) {
+ Runnable task = () -> {
+ ChaincodeMessage nextStatemessage = null;
+ boolean send = true;
+ try {
+ // Get the function and args from Payload
+ ChaincodeInput input;
+ try {
+ input = ChaincodeInput.parseFrom(message.getPayload());
+ } catch (Exception e) {
+ // payload = []byte(unmarshalErr.Error())
+ // // Send ERROR message to chaincode support and change state
+ // logger.debug(String.format("[%s]Incorrect payload format. Sending %s", shortUUID(message), ERROR)
+ // nextStatemessage = ChaincodeMessage.newBuilder(){Type: ERROR, Payload: payload, Uuid: message.getUuid()}
+ return;
+ }
+
+ // // Mark as a transaction (allow put/del state)
+ markIsTransaction(message.getUuid(), true);
+
+ // Create the ChaincodeStub which the chaincode can use to callback
+ ChaincodeStub stub = new ChaincodeStub(message.getUuid(), this);
+
+ // Call chaincode's Run
+ ByteString result;
+ try {
+ result = chaincode.runHelper(stub, input.getFunction(), arrayHelper(input.getArgsList()));
+ } catch (Exception e) {
+ // Send ERROR message to chaincode support and change state
+ logger.debug(String.format("[%s]Init failed. Sending %s", shortUUID(message), ERROR));
+ nextStatemessage = ChaincodeMessage.newBuilder()
+ .setType(ERROR)
+ .setPayload(ByteString.copyFromUtf8(e.getMessage()))
+ .setUuid(message.getUuid())
+ .build();
+ return;
+ } finally {
+ // delete isTransaction entry
+ deleteIsTransaction(message.getUuid());
+ }
+
+ // Send COMPLETED message to chaincode support and change state
+ nextStatemessage = ChaincodeMessage.newBuilder()
+ .setType(COMPLETED)
+ .setPayload(result)
+ .setUuid(message.getUuid())
+ .build();
+
+ logger.debug(String.format(String.format("[%s]Init succeeded. Sending %s",
+ shortUUID(message), COMPLETED)));
+
+ //TODO put in all exception states
+ } catch (Exception e) {
+ throw e;
+ } finally {
+ triggerNextState(nextStatemessage, send);
+ }
+ };
+
+ //Run above task
+ new Thread(task).start();
+ }
+
+
+ private String[] arrayHelper(ProtocolStringList argsList) {
+ String[] array = new String[argsList.size()];
+ argsList.toArray(array);
+ return array;
+ }
+
+ // enterInitState will initialize the chaincode if entering init from established.
+ public void enterInitState(Event event) {
+ logger.debug(String.format("Entered state %s", fsm.current()));
+ ChaincodeMessage message = messageHelper(event);
+ logger.debug(String.format("[%s]Received %s, initializing chaincode",
+ shortUUID(message), message.getType().toString()));
+ if (message.getType() == INIT) {
+ // Call the chaincode's Run function to initialize
+ handleInit(message);
+ }
+ }
+
+ //
+ // handleTransaction Handles request to execute a transaction.
+ public void handleTransaction(ChaincodeMessage message) {
+ // The defer followed by triggering a go routine dance is needed to ensure that the previous state transition
+ // is completed before the next one is triggered. The previous state transition is deemed complete only when
+ // the beforeInit function is exited. Interesting bug fix!!
+ Runnable task = () -> {
+ //better not be nil
+ ChaincodeMessage nextStatemessage = null;
+ boolean send = true;
+
+ //Defer
+ try {
+ // Get the function and args from Payload
+ ChaincodeInput input;
+ try {
+ input = ChaincodeInput.parseFrom(message.getPayload());
+ } catch (Exception e) {
+ logger.debug(String.format("[%s]Incorrect payload format. Sending %s", shortUUID(message), ERROR));
+ // Send ERROR message to chaincode support and change state
+ nextStatemessage = ChaincodeMessage.newBuilder()
+ .setType(ERROR)
+ .setPayload(message.getPayload())
+ .setUuid(message.getUuid())
+ .build();
+ return;
+ }
+
+ // Mark as a transaction (allow put/del state)
+ markIsTransaction(message.getUuid(), true);
+
+ // Create the ChaincodeStub which the chaincode can use to callback
+ ChaincodeStub stub = new ChaincodeStub(message.getUuid(), this);
+
+ // Call chaincode's Run
+ ByteString response;
+ try {
+ response = chaincode.runHelper(stub, input.getFunction(), arrayHelper(input.getArgsList()));
+ } catch (Exception e) {
+ e.printStackTrace();
+ System.err.flush();
+ // Send ERROR message to chaincode support and change state
+ logger.error(String.format("[%s]Error running chaincode. Transaction execution failed. Sending %s",
+ shortUUID(message), ERROR));
+ nextStatemessage = ChaincodeMessage.newBuilder()
+ .setType(ERROR)
+ .setPayload(message.getPayload())
+ .setUuid(message.getUuid())
+ .build();
+ return;
+ } finally {
+ deleteIsTransaction(message.getUuid());
+ }
+
+ logger.debug(String.format("[%s]Transaction completed. Sending %s",
+ shortUUID(message), COMPLETED));
+
+ // Send COMPLETED message to chaincode support and change state
+ Builder builder = ChaincodeMessage.newBuilder()
+ .setType(COMPLETED)
+ .setUuid(message.getUuid());
+ if (response != null) builder.setPayload(response);
+ nextStatemessage = builder.build();
+ } finally {
+ triggerNextState(nextStatemessage, send);
+ }
+ };
+
+ new Thread(task).start();
+ }
+
+ // handleQuery handles request to execute a query.
+ public void handleQuery(ChaincodeMessage message) {
+ // Query does not transition state. It can happen anytime after Ready
+ Runnable task = () -> {
+ ChaincodeMessage serialSendMessage = null;
+ try {
+ // Get the function and args from Payload
+ ChaincodeInput input;
+ try {
+ input = ChaincodeInput.parseFrom(message.getPayload());
+ } catch (Exception e) {
+ // Send ERROR message to chaincode support and change state
+ logger.debug(String.format("[%s]Incorrect payload format. Sending %s",
+ shortUUID(message), QUERY_ERROR));
+ serialSendMessage = ChaincodeMessage.newBuilder()
+ .setType(QUERY_ERROR)
+ .setPayload(ByteString.copyFromUtf8(e.getMessage()))
+ .setUuid(message.getUuid())
+ .build();
+ return;
+ }
+
+ // Mark as a query (do not allow put/del state)
+ markIsTransaction(message.getUuid(), false);
+
+ // Call chaincode's Query
+ // Create the ChaincodeStub which the chaincode can use to callback
+ ChaincodeStub stub = new ChaincodeStub(message.getUuid(), this);
+
+
+ ByteString response;
+ try {
+ response = chaincode.queryHelper(stub, input.getFunction(), arrayHelper(input.getArgsList()));
+ } catch (Exception e) {
+ // Send ERROR message to chaincode support and change state
+ logger.debug(String.format("[%s]Query execution failed. Sending %s",
+ shortUUID(message), QUERY_ERROR));
+ serialSendMessage = ChaincodeMessage.newBuilder()
+ .setType(QUERY_ERROR)
+ .setPayload(ByteString.copyFromUtf8(e.getMessage()))
+ .setUuid(message.getUuid())
+ .build();
+ return;
+ } finally {
+ deleteIsTransaction(message.getUuid());
+ }
+
+ // Send COMPLETED message to chaincode support
+ logger.debug("["+shortUUID(message)+"]Query completed. Sending "+ QUERY_COMPLETED);
+ serialSendMessage = ChaincodeMessage.newBuilder()
+ .setType(QUERY_COMPLETED)
+ .setPayload(response)
+ .setUuid(message.getUuid())
+ .build();
+ } finally {
+ serialSend(serialSendMessage);
+ }
+ };
+
+ new Thread(task).start();
+ }
+
+ // enterTransactionState will execute chaincode's Run if coming from a TRANSACTION event.
+ public void enterTransactionState(Event event) {
+ ChaincodeMessage message = messageHelper(event);
+ logger.debug(String.format("[%s]Received %s, invoking transaction on chaincode(src:%s, dst:%s)",
+ shortUUID(message), message.getType().toString(), event.src, event.dst));
+ if (message.getType() == TRANSACTION) {
+ // Call the chaincode's Run function to invoke transaction
+ handleTransaction(message);
+ }
+ }
+
+ // afterCompleted will need to handle COMPLETED event by sending message to the peer
+ public void afterCompleted(Event event) {
+ ChaincodeMessage message = messageHelper(event);
+ logger.debug(String.format("[%s]sending COMPLETED to validator for tid", shortUUID(message)));
+ try {
+ serialSend(message);
+ } catch (Exception e) {
+ event.cancel(new Exception("send COMPLETED failed %s", e));
+ }
+ }
+
+ // beforeQuery is invoked when a query message is received from the validator
+ public void beforeQuery(Event event) {
+ ChaincodeMessage message = messageHelper(event);
+ handleQuery(message);
+ }
+
+ // afterResponse is called to deliver a response or error to the chaincode stub.
+ public void afterResponse(Event event) {
+ ChaincodeMessage message = messageHelper(event);
+ try {
+ sendChannel(message);
+ logger.debug(String.format("[%s]Received %s, communicated (state:%s)",
+ shortUUID(message), message.getType(), fsm.current()));
+ } catch (Exception e) {
+ logger.error(String.format("[%s]error sending %s (state:%s): %s", shortUUID(message),
+ message.getType(), fsm.current(), e));
+ }
+ }
+
+ private ChaincodeMessage messageHelper(Event event) {
+ try {
+ return (ChaincodeMessage) event.args[0];
+ } catch (Exception e) {
+ RuntimeException error = new RuntimeException("Received unexpected message type");
+ event.cancel(error);
+ throw error;
+ }
+ }
+
+ public void afterError(Event event) {
+ ChaincodeMessage message = messageHelper(event);
+ /* TODO- revisit. This may no longer be needed with the serialized/streamlined messaging model
+ * There are two situations in which the ERROR event can be triggered:
+ * 1. When an error is encountered within handleInit or handleTransaction - some issue at the chaincode side; In this case there will be no responseChannel and the message has been sent to the validator.
+ * 2. The chaincode has initiated a request (get/put/del state) to the validator and is expecting a response on the responseChannel; If ERROR is received from validator, this needs to be notified on the responseChannel.
+ */
+ try {
+ sendChannel(message);
+ } catch (Exception e) {
+ logger.debug(String.format("[%s]Error received from validator %s, communicated(state:%s)",
+ shortUUID(message), message.getType(), fsm.current()));
+ }
+ }
+
+ // handleGetState communicates with the validator to fetch the requested state information from the ledger.
+ public ByteString handleGetState(String key, String uuid) {
+ try {
+ //TODO Implement method to get and put entire state map and not one key at a time?
+ // Create the channel on which to communicate the response from validating peer
+ // Create the channel on which to communicate the response from validating peer
+ Channel responseChannel;
+ try {
+ responseChannel = createChannel(uuid);
+ } catch (Exception e) {
+ logger.debug("Another state request pending for this Uuid. Cannot process.");
+ throw e;
+ }
+
+ // Send GET_STATE message to validator chaincode support
+ ChaincodeMessage message = ChaincodeMessage.newBuilder()
+ .setType(GET_STATE)
+ .setPayload(ByteString.copyFromUtf8(key))
+ .setUuid(uuid)
+ .build();
+
+ logger.debug(String.format("[%s]Sending %s", shortUUID(message), GET_STATE));
+ try {
+ serialSend(message);
+ } catch (Exception e) {
+ logger.error(String.format("[%s]error sending GET_STATE %s", shortUUID(uuid), e));
+ throw new RuntimeException("could not send message");
+ }
+
+ // Wait on responseChannel for response
+ ChaincodeMessage response;
+ try {
+ response = receiveChannel(responseChannel);
+ } catch (Exception e) {
+ logger.error(String.format("[%s]Received unexpected message type", shortUUID(uuid)));
+ throw new RuntimeException("Received unexpected message type");
+ }
+
+ // Success response
+ if (response.getType() == RESPONSE) {
+ logger.debug(String.format("[%s]GetState received payload %s", shortUUID(response.getUuid()), RESPONSE));
+ return response.getPayload();
+ }
+
+ // Error response
+ if (response.getType() == ERROR) {
+ logger.error(String.format("[%s]GetState received error %s", shortUUID(response.getUuid()), ERROR));
+ throw new RuntimeException(response.getPayload().toString());
+ }
+
+ // Incorrect chaincode message received
+ logger.error(String.format("[%s]Incorrect chaincode message %s received. Expecting %s or %s",
+ shortUUID(response.getUuid()), response.getType(), RESPONSE, ERROR));
+ throw new RuntimeException("Incorrect chaincode message received");
+ } finally {
+ deleteChannel(uuid);
+ }
+ }
+
+ private boolean isTransaction(String uuid) {
+ return isTransaction.containsKey(uuid) && isTransaction.get(uuid);
+ }
+
+ public void handlePutState(String key, ByteString value, String uuid) {
+ // Check if this is a transaction
+ logger.debug("["+shortUUID(uuid)+"]Inside putstate (\""+key+"\":\""+value+"\"), isTransaction = "+isTransaction(uuid));
+
+ if (!isTransaction(uuid)) {
+ throw new IllegalStateException("Cannot put state in query context");
+ }
+
+ PutStateInfo payload = PutStateInfo.newBuilder()
+ .setKey(key)
+ .setValue(value)
+ .build();
+
+ // Create the channel on which to communicate the response from validating peer
+ Channel responseChannel;
+ try {
+ responseChannel = createChannel(uuid);
+ } catch (Exception e) {
+ logger.error(String.format("[%s]Another state request pending for this Uuid. Cannot process.", shortUUID(uuid)));
+ throw e;
+ }
+
+ //Defer
+ try {
+ // Send PUT_STATE message to validator chaincode support
+ ChaincodeMessage message = ChaincodeMessage.newBuilder()
+ .setType(PUT_STATE)
+ .setPayload(payload.toByteString())
+ .setUuid(uuid)
+ .build();
+
+ logger.debug(String.format("[%s]Sending %s", shortUUID(message), PUT_STATE));
+
+ try {
+ serialSend(message);
+ } catch (Exception e) {
+ logger.error(String.format("[%s]error sending PUT_STATE %s", message.getUuid(), e));
+ throw new RuntimeException("could not send message");
+ }
+
+ // Wait on responseChannel for response
+ ChaincodeMessage response;
+ try {
+ response = receiveChannel(responseChannel);
+ } catch (Exception e) {
+ //TODO figure out how to get uuid of receive channel
+ logger.error(String.format("[%s]Received unexpected message type", e));
+ throw e;
+ }
+
+ // Success response
+ if (response.getType() == RESPONSE) {
+ logger.debug(String.format("[%s]Received %s. Successfully updated state", shortUUID(response.getUuid()), RESPONSE));
+ return;
+ }
+
+ // Error response
+ if (response.getType() == ERROR) {
+ logger.error(String.format("[%s]Received %s. Payload: %s", shortUUID(response.getUuid()), ERROR, response.getPayload()));
+ throw new RuntimeException(response.getPayload().toStringUtf8());
+ }
+
+ // Incorrect chaincode message received
+ logger.error(String.format("[%s]Incorrect chaincode message %s received. Expecting %s or %s",
+ shortUUID(response.getUuid()), response.getType(), RESPONSE, ERROR));
+
+ throw new RuntimeException("Incorrect chaincode message received");
+ } catch (Exception e) {
+ throw e;
+ } finally {
+ deleteChannel(uuid);
+ }
+ }
+
+ public void handleDeleteState(String key, String uuid) {
+ // Check if this is a transaction
+ if (!isTransaction(uuid)) {
+ throw new RuntimeException("Cannot del state in query context");
+ }
+
+ // Create the channel on which to communicate the response from validating peer
+ Channel responseChannel;
+ try {
+ responseChannel = createChannel(uuid);
+ } catch (Exception e) {
+ logger.error(String.format("[%s]Another state request pending for this Uuid."
+ + " Cannot process create createChannel.",shortUUID(uuid)));
+ throw e;
+ }
+
+ //Defer
+ try {
+ // Send DEL_STATE message to validator chaincode support
+ ChaincodeMessage message = ChaincodeMessage.newBuilder()
+ .setType(DEL_STATE)
+ .setPayload(ByteString.copyFromUtf8(key))
+ .setUuid(uuid)
+ .build();
+ logger.debug(String.format("[%s]Sending %s", shortUUID(uuid), DEL_STATE));
+ try {
+ serialSend(message);
+ } catch (Exception e) {
+ logger.error(String.format("[%s]error sending DEL_STATE %s", shortUUID(message), DEL_STATE));
+ throw new RuntimeException("could not send message");
+ }
+
+ // Wait on responseChannel for response
+ ChaincodeMessage response;
+ try {
+ response = receiveChannel(responseChannel);
+ } catch (Exception e) {
+ logger.error(String.format("[%s]Received unexpected message type", shortUUID(message)));
+ throw new RuntimeException("Received unexpected message type");
+ }
+
+ if (response.getType() == RESPONSE) {
+ // Success response
+ logger.debug(String.format("[%s]Received %s. Successfully deleted state", message.getUuid(), RESPONSE));
+ return;
+ }
+
+ if (response.getType() == ERROR) {
+ // Error response
+ logger.error(String.format("[%s]Received %s. Payload: %s", message.getUuid(), ERROR, response.getPayload()));
+ throw new RuntimeException(response.getPayload().toStringUtf8());
+ }
+
+ // Incorrect chaincode message received
+ logger.error(String.format("[%s]Incorrect chaincode message %s received. Expecting %s or %s",
+ shortUUID(response.getUuid()), response.getType(), RESPONSE, ERROR));
+ throw new RuntimeException("Incorrect chaincode message received");
+ } finally {
+ deleteChannel(uuid);
+ }
+ }
+
+// public RangeQueryStateResponse handleRangeQueryState(String startKey, String endKey, int limit, String uuid) {
+// // Create the channel on which to communicate the response from validating peer
+// Channel responseChannel;
+// try {
+// responseChannel = createChannel(uuid);
+// } catch (Exception e) {
+// logger.debug(String.format("[%s]Another state request pending for this Uuid."
+// + " Cannot process.", shortUUID(uuid)));
+// throw e;
+// }
+//
+// //Defer
+// try {
+// // Send RANGE_QUERY_STATE message to validator chaincode support
+// RangeQueryStateInfo payload = RangeQueryStateInfo.newBuilder()
+// .setStartKey(startKey)
+// .setEndKey(endKey)
+// .setLimit(limit)
+// .build();
+//
+// ChaincodeMessage message = ChaincodeMessage.newBuilder()
+// .setType(RANGE_QUERY_STATE)
+// .setPayload(payload.toByteString())
+// .setUuid(uuid)
+// .build();
+//
+// logger.debug(String.format("[%s]Sending %s", shortUUID(message), RANGE_QUERY_STATE));
+// try {
+// serialSend(message);
+// } catch (Exception e){
+// logger.error(String.format("[%s]error sending %s", shortUUID(message), RANGE_QUERY_STATE));
+// throw new RuntimeException("could not send message");
+// }
+//
+// // Wait on responseChannel for response
+// ChaincodeMessage response;
+// try {
+// response = receiveChannel(responseChannel);
+// } catch (Exception e) {
+// logger.error(String.format("[%s]Received unexpected message type", uuid));
+// throw new RuntimeException("Received unexpected message type");
+// }
+//
+// if (response.getType() == RESPONSE) {
+// // Success response
+// logger.debug(String.format("[%s]Received %s. Successfully got range",
+// shortUUID(response.getUuid()), RESPONSE));
+//
+// RangeQueryStateResponse rangeQueryResponse;
+// try {
+// rangeQueryResponse = RangeQueryStateResponse.parseFrom(response.getPayload());
+// } catch (Exception e) {
+// logger.error(String.format("[%s]unmarshall error", shortUUID(response.getUuid())));
+// throw new RuntimeException("Error unmarshalling RangeQueryStateResponse.");
+// }
+//
+// return rangeQueryResponse;
+// }
+//
+// if (response.getType() == ERROR) {
+// // Error response
+// logger.error(String.format("[%s]Received %s",
+// shortUUID(response.getUuid()), ERROR));
+// throw new RuntimeException(response.getPayload().toStringUtf8());
+// }
+//
+// // Incorrect chaincode message received
+// logger.error(String.format("Incorrect chaincode message %s recieved. Expecting %s or %s",
+// response.getType(), RESPONSE, ERROR));
+// throw new RuntimeException("Incorrect chaincode message received");
+// } finally {
+// deleteChannel(uuid);
+// }
+// }
+
+ public ByteString handleInvokeChaincode(String chaincodeName, String function, String[] args, String uuid) {
+ // Check if this is a transaction
+ if (!isTransaction.containsKey(uuid)) {
+ throw new RuntimeException("Cannot invoke chaincode in query context");
+ }
+
+ ChaincodeID id = ChaincodeID.newBuilder()
+ .setName(chaincodeName).build();
+ ChaincodeInput input = ChaincodeInput.newBuilder()
+ .setFunction(function)
+ .addAllArgs(Arrays.asList(args))
+ .build();
+ ChaincodeSpec payload = ChaincodeSpec.newBuilder()
+ .setChaincodeID(id)
+ .setCtorMsg(input)
+ .build();
+
+ // Create the channel on which to communicate the response from validating peer
+ Channel responseChannel;
+ try {
+ responseChannel = createChannel(uuid);
+ } catch (Exception e) {
+ logger.error(String.format("[%s]Another state request pending for this Uuid. Cannot process.", shortUUID(uuid)));
+ throw e;
+ }
+
+ //Defer
+ try {
+ // Send INVOKE_CHAINCODE message to validator chaincode support
+ ChaincodeMessage message = ChaincodeMessage.newBuilder()
+ .setType(INVOKE_CHAINCODE)
+ .setPayload(payload.toByteString())
+ .setUuid(uuid)
+ .build();
+
+ logger.debug(String.format("[%s]Sending %s",
+ shortUUID(message), INVOKE_CHAINCODE));
+
+ try {
+ serialSend(message);
+ } catch (Exception e) {
+ logger.error("["+shortUUID(message)+"]Error sending "+INVOKE_CHAINCODE+": "+e.getMessage());
+ throw e;
+ }
+
+ // Wait on responseChannel for response
+ ChaincodeMessage response;
+ try {
+ response = receiveChannel(responseChannel);
+ } catch (Exception e) {
+ logger.error(String.format("[%s]Received unexpected message type", shortUUID(message)));
+ throw new RuntimeException("Received unexpected message type");
+ }
+
+ if (response.getType() == RESPONSE) {
+ // Success response
+ logger.debug(String.format("[%s]Received %s. Successfully invoked chaincode", shortUUID(response.getUuid()), RESPONSE));
+ return response.getPayload();
+ }
+
+ if (response.getType() == ERROR) {
+ // Error response
+ logger.error(String.format("[%s]Received %s.", shortUUID(response.getUuid()), ERROR));
+ throw new RuntimeException(response.getPayload().toStringUtf8());
+ }
+
+ // Incorrect chaincode message received
+ logger.debug(String.format("[%s]Incorrect chaincode message %s received. Expecting %s or %s",
+ shortUUID(response.getUuid()), response.getType(), RESPONSE, ERROR));
+ throw new RuntimeException("Incorrect chaincode message received");
+ } finally {
+ deleteChannel(uuid);
+ }
+ }
+
+ public ByteString handleQueryChaincode(String chaincodeName, String function, String[] args, String uuid) {
+ ChaincodeID id = ChaincodeID.newBuilder().setName(chaincodeName).build();
+ ChaincodeInput input = ChaincodeInput.newBuilder()
+ .setFunction(function)
+ .addAllArgs(Arrays.asList(args))
+ .build();
+ ChaincodeSpec payload = ChaincodeSpec.newBuilder()
+ .setChaincodeID(id)
+ .setCtorMsg(input)
+ .build();
+
+ // Create the channel on which to communicate the response from validating peer
+ Channel responseChannel;
+ try {
+ responseChannel = createChannel(uuid);
+ } catch (Exception e) {
+ logger.debug(String.format("Another request pending for this Uuid. Cannot process."));
+ throw e;
+ }
+
+ //Defer
+ try {
+
+ // Send INVOKE_QUERY message to validator chaincode support
+ ChaincodeMessage message = ChaincodeMessage.newBuilder()
+ .setType(INVOKE_QUERY)
+ .setPayload(payload.toByteString())
+ .setUuid(uuid)
+ .build();
+
+ logger.debug(String.format("[%s]Sending %s", shortUUID(message), INVOKE_QUERY));
+
+ try {
+ serialSend(message);
+ } catch (Exception e) {
+ logger.error(String.format("[%s]error sending %s", shortUUID(message), INVOKE_QUERY));
+ throw new RuntimeException("could not send message");
+ }
+
+ // Wait on responseChannel for response
+ ChaincodeMessage response;
+ try {
+ response = receiveChannel(responseChannel);
+ } catch (Exception e) {
+ logger.error(String.format("[%s]Received unexpected message type", shortUUID(message)));
+ throw new RuntimeException("Received unexpected message type");
+ }
+
+ if (response.getType() == RESPONSE) {
+ // Success response
+ logger.debug(String.format("[%s]Received %s. Successfully queried chaincode",
+ shortUUID(response.getUuid()), RESPONSE));
+ return response.getPayload();
+ }
+
+ if (response.getType() == ERROR) {
+ // Error response
+ logger.error(String.format("[%s]Received %s.",
+ shortUUID(response.getUuid()), ERROR));
+ throw new RuntimeException(response.getPayload().toStringUtf8());
+ }
+
+ // Incorrect chaincode message received
+ logger.error(String.format("[%s]Incorrect chaincode message %s recieved. Expecting %s or %s",
+ shortUUID(response.getUuid()), response.getType(), RESPONSE, ERROR));
+ throw new RuntimeException("Incorrect chaincode message received");
+ } finally {
+ deleteChannel(uuid);
+ }
+ }
+
+ // handleMessage message handles loop for org.hyperledger.java.shim side of chaincode/validator stream.
+ public synchronized void handleMessage(ChaincodeMessage message) throws Exception {
+
+
+ if (message.getType() == ChaincodeMessage.Type.KEEPALIVE){
+ logger.debug(String.format("[%s] Recieved KEEPALIVE message, do nothing",
+ shortUUID(message)));
+ // Received a keep alive message, we don't do anything with it for now
+ // and it does not touch the state machine
+ return;
+ }
+ logger.debug(String.format("[%s]Handling ChaincodeMessage of type: %s(state:%s)",
+ shortUUID(message), message.getType(), fsm.current()));
+
+ if (fsm.eventCannotOccur(message.getType().toString())) {
+ String errStr = String.format("[%s]Chaincode handler org.hyperledger.java.fsm cannot handle message (%s) with payload size (%d) while in state: %s",
+ message.getUuid(), message.getType(), message.getPayload().size(), fsm.current());
+ ByteString payload = ByteString.copyFromUtf8(errStr);
+ ChaincodeMessage errormessage = ChaincodeMessage.newBuilder()
+ .setType(ERROR)
+ .setPayload(payload)
+ .setUuid(message.getUuid())
+ .build();
+ serialSend(errormessage);
+ throw new RuntimeException(errStr);
+ }
+
+ // Filter errors to allow NoTransitionError and CanceledError
+ // to not propagate for cases where embedded Err == nil.
+ try {
+ fsm.raiseEvent(message.getType().toString(), message);
+ } catch (NoTransitionException e) {
+ if (e.error != null) throw e;
+ logger.debug("["+shortUUID(message)+"]Ignoring NoTransitionError");
+ } catch (CancelledException e) {
+ if (e.error != null) throw e;
+ logger.debug("["+shortUUID(message)+"]Ignoring CanceledError");
+ }
+ }
+
+ private String shortUUID(ChaincodeMessage message) {
+ return shortUUID(message.getUuid());
+ }
+
+}
diff --git a/core/chaincode/shim/java/src/main/java/org/hyperledger/java/shim/NextStateInfo.java b/core/chaincode/shim/java/src/main/java/org/hyperledger/java/shim/NextStateInfo.java
new file mode 100644
index 00000000000..0c6cbd844dc
--- /dev/null
+++ b/core/chaincode/shim/java/src/main/java/org/hyperledger/java/shim/NextStateInfo.java
@@ -0,0 +1,31 @@
+/*
+Copyright DTCC 2016 All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package org.hyperledger.java.shim;
+
+import protos.Chaincode.ChaincodeMessage;
+
+public class NextStateInfo {
+
+ public ChaincodeMessage message;
+ public boolean sendToCC;
+
+ public NextStateInfo(ChaincodeMessage message, boolean sendToCC) {
+ this.message = message;
+ this.sendToCC = sendToCC;
+ }
+
+}
diff --git a/core/chaincode/shim/java/src/main/proto/chaincode.proto b/core/chaincode/shim/java/src/main/proto/chaincode.proto
new file mode 100644
index 00000000000..94f034759c0
--- /dev/null
+++ b/core/chaincode/shim/java/src/main/proto/chaincode.proto
@@ -0,0 +1,197 @@
+/*
+Copyright IBM Corp. 2016 All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+syntax = "proto3";
+
+package protos;
+
+import "chaincodeevent.proto";
+import "google/protobuf/timestamp.proto";
+
+
+// Confidentiality Levels
+enum ConfidentialityLevel {
+ PUBLIC = 0;
+ CONFIDENTIAL = 1;
+}
+
+
+//ChaincodeID contains the path as specified by the deploy transaction
+//that created it as well as the hashCode that is generated by the
+//system for the path. From the user level (ie, CLI, REST API and so on)
+//deploy transaction is expected to provide the path and other requests
+//are expected to provide the hashCode. The other value will be ignored.
+//Internally, the structure could contain both values. For instance, the
+//hashCode will be set when first generated using the path
+message ChaincodeID {
+ //deploy transaction will use the path
+ string path = 1;
+
+ //all other requests will use the name (really a hashcode) generated by
+ //the deploy transaction
+ string name = 2;
+}
+
+// Carries the chaincode function and its arguments.
+message ChaincodeInput {
+
+ string function = 1;
+ repeated string args = 2;
+
+}
+
+// Carries the chaincode specification. This is the actual metadata required for
+// defining a chaincode.
+message ChaincodeSpec {
+
+ enum Type {
+ UNDEFINED = 0;
+ GOLANG = 1;
+ NODE = 2;
+ CAR = 3;
+ JAVA = 4;
+ }
+
+ Type type = 1;
+ ChaincodeID chaincodeID = 2;
+ ChaincodeInput ctorMsg = 3;
+ int32 timeout = 4;
+ string secureContext = 5;
+ ConfidentialityLevel confidentialityLevel = 6;
+ bytes metadata = 7;
+ repeated string attributes = 8;
+}
+
+// Specify the deployment of a chaincode.
+// TODO: Define `codePackage`.
+message ChaincodeDeploymentSpec {
+
+ enum ExecutionEnvironment {
+ DOCKER = 0;
+ SYSTEM = 1;
+ }
+
+ ChaincodeSpec chaincodeSpec = 1;
+ // Controls when the chaincode becomes executable.
+ google.protobuf.Timestamp effectiveDate = 2;
+ bytes codePackage = 3;
+ ExecutionEnvironment execEnv= 4;
+
+}
+
+// Carries the chaincode function and its arguments.
+message ChaincodeInvocationSpec {
+
+ ChaincodeSpec chaincodeSpec = 1;
+ // This field can contain a user-specified ID generation algorithm
+ // If supplied, this will be used to generate a ID
+ // If not supplied (left empty), a random UUID will be generated
+ // The algorithm consists of two parts:
+ // 1, a hash function
+ // 2, a decoding used to decode user (string) input to bytes
+ // Currently, SHA256 with BASE64 is supported (e.g. idGenerationAlg='sha256base64')
+ string idGenerationAlg = 2;
+}
+
+// This structure contain transaction data that we send to the chaincode
+// container shim and allow the chaincode to access through the shim interface.
+// TODO: Consider remove this message and just pass the transaction object
+// to the shim and/or allow the chaincode to query transactions.
+message ChaincodeSecurityContext {
+ bytes callerCert = 1;
+ bytes callerSign = 2;
+ bytes payload = 3;
+ bytes binding = 4;
+ bytes metadata = 5;
+ bytes parentMetadata = 6;
+ google.protobuf.Timestamp txTimestamp = 7; // transaction timestamp
+}
+
+message ChaincodeMessage {
+
+ enum Type {
+ UNDEFINED = 0;
+ REGISTER = 1;
+ REGISTERED = 2;
+ INIT = 3;
+ READY = 4;
+ TRANSACTION = 5;
+ COMPLETED = 6;
+ ERROR = 7;
+ GET_STATE = 8;
+ PUT_STATE = 9;
+ DEL_STATE = 10;
+ INVOKE_CHAINCODE = 11;
+ INVOKE_QUERY = 12;
+ RESPONSE = 13;
+ QUERY = 14;
+ QUERY_COMPLETED = 15;
+ QUERY_ERROR = 16;
+ RANGE_QUERY_STATE = 17;
+ RANGE_QUERY_STATE_NEXT = 18;
+ RANGE_QUERY_STATE_CLOSE = 19;
+ KEEPALIVE = 20;
+ }
+
+ Type type = 1;
+ google.protobuf.Timestamp timestamp = 2;
+ bytes payload = 3;
+ string uuid = 4;
+ ChaincodeSecurityContext securityContext = 5;
+
+ //event emmited by chaincode. Used only with Init or Invoke.
+ // This event is then stored (currently)
+ //with Block.NonHashData.TransactionResult
+ ChaincodeEvent chaincodeEvent = 6;
+}
+
+message PutStateInfo {
+ string key = 1;
+ bytes value = 2;
+}
+
+message RangeQueryState {
+ string startKey = 1;
+ string endKey = 2;
+}
+
+message RangeQueryStateNext {
+ string ID = 1;
+}
+
+message RangeQueryStateClose {
+ string ID = 1;
+}
+
+message RangeQueryStateKeyValue {
+ string key = 1;
+ bytes value = 2;
+}
+
+message RangeQueryStateResponse {
+ repeated RangeQueryStateKeyValue keysAndValues = 1;
+ bool hasMore = 2;
+ string ID = 3;
+}
+
+// Interface that provides support to chaincode execution. ChaincodeContext
+// provides the context necessary for the server to respond appropriately.
+service ChaincodeSupport {
+
+ rpc Register(stream ChaincodeMessage) returns (stream ChaincodeMessage) {}
+
+
+}
diff --git a/core/chaincode/shim/java/src/main/proto/chaincodeevent.proto b/core/chaincode/shim/java/src/main/proto/chaincodeevent.proto
new file mode 100755
index 00000000000..0c2b3bfce1d
--- /dev/null
+++ b/core/chaincode/shim/java/src/main/proto/chaincodeevent.proto
@@ -0,0 +1,26 @@
+/*
+Copyright IBM Corp. 2016 All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+syntax = "proto3";
+package protos;
+
+//Chaincode is used for events and registrations that are specific to chaincode
+//string type - "chaincode"
+message ChaincodeEvent {
+ string chaincodeID = 1;
+ string txID = 2;
+ string eventName = 3;
+ bytes payload = 4;
+}
diff --git a/core/chaincode/shim/shim_test.go b/core/chaincode/shim/shim_test.go
new file mode 100644
index 00000000000..59f549a0278
--- /dev/null
+++ b/core/chaincode/shim/shim_test.go
@@ -0,0 +1,136 @@
+/*
+Copyright IBM Corp. 2016 All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package shim
+
+import (
+ "os"
+ "testing"
+
+ "github.com/op/go-logging"
+)
+
+// Test Go shim functionality that can be tested outside of a real chaincode
+// context.
+
+// TestShimLogging simply tests that the APIs are working. These tests test
+// for correct control over the shim's logging object and the LogLevel
+// function.
+func TestShimLogging(t *testing.T) {
+ SetLoggingLevel(LogCritical)
+ if shimLoggingLevel != LogCritical {
+ t.Errorf("shimLoggingLevel is not LogCritical as expected")
+ }
+ if chaincodeLogger.IsEnabledFor(logging.DEBUG) {
+ t.Errorf("The chaincodeLogger should not be enabled for DEBUG")
+ }
+ if !chaincodeLogger.IsEnabledFor(logging.CRITICAL) {
+ t.Errorf("The chaincodeLogger should be enabled for CRITICAL")
+ }
+ var level LoggingLevel
+ var err error
+ level, err = LogLevel("debug")
+ if err != nil {
+ t.Errorf("LogLevel(debug) failed")
+ }
+ if level != LogDebug {
+ t.Errorf("LogLevel(debug) did not return LogDebug")
+ }
+ level, err = LogLevel("INFO")
+ if err != nil {
+ t.Errorf("LogLevel(INFO) failed")
+ }
+ if level != LogInfo {
+ t.Errorf("LogLevel(INFO) did not return LogInfo")
+ }
+ level, err = LogLevel("Notice")
+ if err != nil {
+ t.Errorf("LogLevel(Notice) failed")
+ }
+ if level != LogNotice {
+ t.Errorf("LogLevel(Notice) did not return LogNotice")
+ }
+ level, err = LogLevel("WaRnInG")
+ if err != nil {
+ t.Errorf("LogLevel(WaRnInG) failed")
+ }
+ if level != LogWarning {
+ t.Errorf("LogLevel(WaRnInG) did not return LogWarning")
+ }
+ level, err = LogLevel("ERRor")
+ if err != nil {
+ t.Errorf("LogLevel(ERRor) failed")
+ }
+ if level != LogError {
+ t.Errorf("LogLevel(ERRor) did not return LogError")
+ }
+ level, err = LogLevel("critiCAL")
+ if err != nil {
+ t.Errorf("LogLevel(critiCAL) failed")
+ }
+ if level != LogCritical {
+ t.Errorf("LogLevel(critiCAL) did not return LogCritical")
+ }
+ level, err = LogLevel("foo")
+ if err == nil {
+ t.Errorf("LogLevel(foo) did not fail")
+ }
+ if level != LogError {
+ t.Errorf("LogLevel(foo) did not return LogError")
+ }
+}
+
+// TestChaincodeLogging tests the logging APIs for chaincodes.
+func TestChaincodeLogging(t *testing.T) {
+
+ // From start() - We can't call start() from this test
+ format := logging.MustStringFormatter("%{time:15:04:05.000} [%{module}] %{level:.4s} : %{message}")
+ backend := logging.NewLogBackend(os.Stderr, "", 0)
+ backendFormatter := logging.NewBackendFormatter(backend, format)
+ logging.SetBackend(backendFormatter).SetLevel(logging.Level(shimLoggingLevel), "shim")
+
+ foo := NewLogger("foo")
+ bar := NewLogger("bar")
+
+ foo.Debugf("Foo is debugging: %d", 10)
+ bar.Infof("Bar is informational? %s.", "Yes")
+ foo.Noticef("NOTE NOTE NOTE")
+ bar.Warningf("Danger, Danger %s %s", "Will", "Robinson!")
+ foo.Errorf("I'm sorry Dave, I'm afraid I can't do that.")
+ bar.Criticalf("PI is not equal to 3.14, we computed it as %.2f", 4.13)
+
+ bar.Debug("Foo is debugging:", 10)
+ foo.Info("Bar is informational?", "Yes.")
+ bar.Notice("NOTE NOTE NOTE")
+ foo.Warning("Danger, Danger", "Will", "Robinson!")
+ bar.Error("I'm sorry Dave, I'm afraid I can't do that.")
+ foo.Critical("PI is not equal to", 3.14, ", we computed it as", 4.13)
+
+ foo.SetLevel(LogWarning)
+ if foo.IsEnabledFor(LogDebug) {
+ t.Errorf("'foo' should not be enabled for LogDebug")
+ }
+ if !foo.IsEnabledFor(LogCritical) {
+ t.Errorf("'foo' should be enabled for LogCritical")
+ }
+ bar.SetLevel(LogCritical)
+ if bar.IsEnabledFor(LogDebug) {
+ t.Errorf("'bar' should not be enabled for LogDebug")
+ }
+ if !bar.IsEnabledFor(LogCritical) {
+ t.Errorf("'bar' should be enabled for LogCritical")
+ }
+}
diff --git a/core/chaincode/testdata/server1.key b/core/chaincode/testdata/server1.key
new file mode 100644
index 00000000000..05225e25bdf
--- /dev/null
+++ b/core/chaincode/testdata/server1.key
@@ -0,0 +1,28 @@
+-----BEGIN PRIVATE KEY-----
+MIIEvgIBADANBgkqhkiG9w0BAQEFAASCBKgwggSkAgEAAoIBAQC9I7JdhKdQSen0
+SU10GB6mQaqU3NNhccHCYEZxW0GAy2e1i3UHRSMjIm4QkgYeFHOC3FmadfkjcUS7
+A/65O0Cva5+GlVpAodh2hWzbICEwhwnkvFvC7lpk/5CBGwrMMkWdb5YmRLBftALv
+AnxNYIZtb4yDDMfNhjJgv+a4+nHvGMFu7zobZHw049G+Pyb/QcdfV2sq33r9BWfg
+XiGNXU/IT064aL7DP8HeQ9tbm3UjQykN7N4MeRUvUCOVQDaqnuBG1ltxmPNk3daH
+Ht15ezGQj7+luCZfoKxsPbdEjSTao4QHIpiXkktjtF6nHe6HGNlcR4oODdTWwvyH
+P+cls6E5AgMBAAECggEAawBK5AUr8oweQwlFPCx5Cm2T52uWWkZU63Yz4+VQhWCN
+Z3Nh3Z8LZdEQm2dA9rtzLdEjD/M14wstau+zp28SBSvJsAXGbVIid8q77quamy2K
+N1EoSHs3wHOkeKWn0iWOpKaAN14cucfI8RZi5BN7eWXmBC2yy17kGT/3cyacVjqd
+v5MG0q7SycCMJQ9v0w5ucnxMwo0xl1P+rvgMlJmEVoY4GMRyIJFhsd/FiPtdRufo
+YgfoSCLhBcGaRDuoVMj/eY+iVFNFf0V25Z5FaBSCbdSu0gUxVeK8qio+/yHiLgSE
+rNkKfnZXozATitSDPhYqI/Z6arKbezyz3M1Mdnc4AQKBgQDoIByrspyC8zhe0Tqs
++yh5QhtjG69sybK/hUHs1BgA1fvsbhdjY/lYDcfmfgr1bq1qBldKT/TeDTGcT7jf
+O3cmqQADqaHnvEAjolYWL6coWx6DjMbmGAOHYcKH5UJNtxQyvYdBCukYNdS6qeQ4
+CSuDPhDCpuVKSCCkjfMj/shN+QKBgQDQl8I+mzrGhDxoXUOGSgrEZlm6IGiQFrnw
+SGSuYXSkCk8VvNJ7rejaM3XbVyMHhQtoZzoILXTEtB9bWETGpxMJY9f5IZrlap5f
+0n4TcfkVUTutWBRGv7pnNa9LQgKbl9g2g+0ZPpqhVzGKHiWXim3DTwMrmKwMMBDM
+S2AWOz+9QQKBgQDZxD6BicgxRdJtiaadX+bBk5Yz7WYvDi5mZLLLZtK5/gYg8ct5
+T/I9fHg8MaVASbvgyuAbdFWJ+CDBvzz2GEiY+VVtGjgtOjuF8qcGm2AbiRhgW7LB
+bGfrG3ivmxIP7KDDVXFNQNoa/216TSYwt9Gdh52br41Omcod9pnS/BSYqQKBgCV8
+wJoDx8JZjYmsCTCfKeQ2CLYckO4kR3JZcE6ukQnSGHhtoN49WPWt/eZXeKInVO+E
+bbd+g9e+D0yZRnL1H8S2PuA1G+NTD/dkmuffucYM/U6f37T/S/60dTZvPIy980uI
+1hmNWLjaNLr/w/3CEQvirU0ueO0PzlUIztHv//qBAoGBAKYxg283f/UNZhDz2QyX
+a6brNrF/EL5MF2yTSUuowuwRGOqsQeCN+cNNWOG9tB9sU3ozeG+tKpQu/SU/rZcr
+qQPl1kwA2swv48HaejZcQX6Nm/9c7IKhzPHskDjZauwa+8ag708K3Bv2HwRhRoyc
+pg3wyc8mI9fhOc4LnKsiePPS
+-----END PRIVATE KEY-----
diff --git a/core/chaincode/testdata/server1.pem b/core/chaincode/testdata/server1.pem
new file mode 100644
index 00000000000..71e7ce6761c
--- /dev/null
+++ b/core/chaincode/testdata/server1.pem
@@ -0,0 +1,21 @@
+-----BEGIN CERTIFICATE-----
+MIIDYzCCAkugAwIBAgIJAIbjeVJsMa4MMA0GCSqGSIb3DQEBCwUAMEgxCzAJBgNV
+BAYTAlVTMQswCQYDVQQIDAJOQzEPMA0GA1UEBwwGRHVyaGFtMQswCQYDVQQKDAJI
+TDEOMAwGA1UEAwwFZHVtbXkwHhcNMTYwNjE4MTUzMTU3WhcNMjYwNjE2MTUzMTU3
+WjBIMQswCQYDVQQGEwJVUzELMAkGA1UECAwCTkMxDzANBgNVBAcMBkR1cmhhbTEL
+MAkGA1UECgwCSEwxDjAMBgNVBAMMBWR1bW15MIIBIjANBgkqhkiG9w0BAQEFAAOC
+AQ8AMIIBCgKCAQEAvSOyXYSnUEnp9ElNdBgepkGqlNzTYXHBwmBGcVtBgMtntYt1
+B0UjIyJuEJIGHhRzgtxZmnX5I3FEuwP+uTtAr2ufhpVaQKHYdoVs2yAhMIcJ5Lxb
+wu5aZP+QgRsKzDJFnW+WJkSwX7QC7wJ8TWCGbW+MgwzHzYYyYL/muPpx7xjBbu86
+G2R8NOPRvj8m/0HHX1drKt96/QVn4F4hjV1PyE9OuGi+wz/B3kPbW5t1I0MpDeze
+DHkVL1AjlUA2qp7gRtZbcZjzZN3Whx7deXsxkI+/pbgmX6CsbD23RI0k2qOEByKY
+l5JLY7Repx3uhxjZXEeKDg3U1sL8hz/nJbOhOQIDAQABo1AwTjAdBgNVHQ4EFgQU
+O/l3pfRJKFQ4RvXPZolnUkreSngwHwYDVR0jBBgwFoAUO/l3pfRJKFQ4RvXPZoln
+UkreSngwDAYDVR0TBAUwAwEB/zANBgkqhkiG9w0BAQsFAAOCAQEAaCUwh66AKgBY
+mCg/q5XxdpFKCbKqPREoG/FG0fk94683+w5cERspoLDzdQNiq4Ij/vLKVmGtRr3L
+tYBFT1ICH+kjb+tJMzerGrQxHuBBtyqQdsY1vpk7h/kj0tsAIyljm492+ETHxmlA
+mFdxqA1YbGpDMs1r//iLBLgU17+hyqfO0POcXXSJnJDhKCuBmeeHtRcydYyDbETd
+BgjFOPUQsprFBh7niHAl1vAEBqII/M6d9x23FNgm5kgmNJWoNu2DbWbnLOWQCBgv
+CabzNVL93EIAhTiNQ0e1tkdNKy7rtiguAFdtZRIKVD/qIWEK40k6NXCBsTlwquDL
+EMTljgKrSQ==
+-----END CERTIFICATE-----
diff --git a/core/comm/config.go b/core/comm/config.go
new file mode 100644
index 00000000000..ed0eaddd0ac
--- /dev/null
+++ b/core/comm/config.go
@@ -0,0 +1,53 @@
+/*
+Copyright IBM Corp. 2016 All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package comm
+
+import (
+ "github.com/spf13/viper"
+)
+
+// Is the configuration cached?
+var configurationCached = false
+
+// Cached values of commonly used configuration constants.
+var tlsEnabled bool
+
+// CacheConfiguration computes and caches commonly-used constants and
+// computed constants as package variables. Routines which were previously
+func CacheConfiguration() (err error) {
+
+ tlsEnabled = viper.GetBool("peer.tls.enabled")
+
+ configurationCached = true
+
+ return
+}
+
+// cacheConfiguration logs an error if error checks have failed.
+func cacheConfiguration() {
+ if err := CacheConfiguration(); err != nil {
+ commLogger.Errorf("Execution continues after CacheConfiguration() failure : %s", err)
+ }
+}
+
+// TLSEnabled return cached value for "peer.tls.enabled" configuration value
+func TLSEnabled() bool {
+ if !configurationCached {
+ cacheConfiguration()
+ }
+ return tlsEnabled
+}
diff --git a/core/comm/connection.go b/core/comm/connection.go
new file mode 100644
index 00000000000..2916474f0b3
--- /dev/null
+++ b/core/comm/connection.go
@@ -0,0 +1,54 @@
+package comm
+
+import (
+ "time"
+
+ "google.golang.org/grpc"
+ "google.golang.org/grpc/credentials"
+ "google.golang.org/grpc/grpclog"
+
+ "github.com/op/go-logging"
+ "github.com/spf13/viper"
+)
+
+const defaultTimeout = time.Second * 3
+
+var commLogger = logging.MustGetLogger("comm")
+
+// NewClientConnectionWithAddress Returns a new grpc.ClientConn to the given address.
+func NewClientConnectionWithAddress(peerAddress string, block bool, tslEnabled bool, creds credentials.TransportAuthenticator) (*grpc.ClientConn, error) {
+ var opts []grpc.DialOption
+ if tslEnabled {
+ opts = append(opts, grpc.WithTransportCredentials(creds))
+ } else {
+ opts = append(opts, grpc.WithInsecure())
+ }
+ opts = append(opts, grpc.WithTimeout(defaultTimeout))
+ if block {
+ opts = append(opts, grpc.WithBlock())
+ }
+ conn, err := grpc.Dial(peerAddress, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return conn, err
+}
+
+// InitTLSForPeer returns TLS credentials for peer
+func InitTLSForPeer() credentials.TransportAuthenticator {
+ var sn string
+ if viper.GetString("peer.tls.serverhostoverride") != "" {
+ sn = viper.GetString("peer.tls.serverhostoverride")
+ }
+ var creds credentials.TransportAuthenticator
+ if viper.GetString("peer.tls.cert.file") != "" {
+ var err error
+ creds, err = credentials.NewClientTLSFromFile(viper.GetString("peer.tls.cert.file"), sn)
+ if err != nil {
+ grpclog.Fatalf("Failed to create TLS credentials %v", err)
+ }
+ } else {
+ creds = credentials.NewClientTLSFromCert(nil, sn)
+ }
+ return creds
+}
diff --git a/core/comm/connection_test.go b/core/comm/connection_test.go
new file mode 100644
index 00000000000..bc4a2300efb
--- /dev/null
+++ b/core/comm/connection_test.go
@@ -0,0 +1,44 @@
+package comm
+
+import (
+ "fmt"
+ "testing"
+
+ "github.com/spf13/viper"
+
+ "github.com/hyperledger/fabric/core/config"
+ "google.golang.org/grpc"
+)
+
+func TestConnection_Correct(t *testing.T) {
+ config.SetupTestConfig("./../../peer")
+ viper.Set("ledger.blockchain.deploy-system-chaincode", "false")
+ var tmpConn *grpc.ClientConn
+ var err error
+ if TLSEnabled() {
+ tmpConn, err = NewClientConnectionWithAddress(viper.GetString("peer.address"), true, true, InitTLSForPeer())
+ }
+ tmpConn, err = NewClientConnectionWithAddress(viper.GetString("peer.address"), true, false, nil)
+ if err != nil {
+ t.Fatalf("error connection to server at host:port = %s\n", viper.GetString("peer.address"))
+ }
+
+ tmpConn.Close()
+}
+
+func TestConnection_WrongAddress(t *testing.T) {
+ config.SetupTestConfig("./../../peer")
+ viper.Set("ledger.blockchain.deploy-system-chaincode", "false")
+ viper.Set("peer.address", "0.0.0.0:30304")
+ var tmpConn *grpc.ClientConn
+ var err error
+ if TLSEnabled() {
+ tmpConn, err = NewClientConnectionWithAddress(viper.GetString("peer.address"), true, true, InitTLSForPeer())
+ }
+ tmpConn, err = NewClientConnectionWithAddress(viper.GetString("peer.address"), true, false, nil)
+ if err == nil {
+ fmt.Printf("error connection to server - at host:port = %s\n", viper.GetString("peer.address"))
+ t.Error("error connection to server - connection should fail")
+ tmpConn.Close()
+ }
+}
diff --git a/core/config.go b/core/config.go
new file mode 100644
index 00000000000..9bf1feb9251
--- /dev/null
+++ b/core/config.go
@@ -0,0 +1,106 @@
+/*
+Copyright IBM Corp. 2016 All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package core
+
+import (
+ "flag"
+ "fmt"
+ "runtime"
+ "strings"
+
+ "github.com/op/go-logging"
+ "github.com/spf13/viper"
+)
+
+// Config the config wrapper structure
+type Config struct {
+}
+
+func init() {
+
+}
+
+// SetupTestLogging setup the logging during test execution
+func SetupTestLogging() {
+ level, err := logging.LogLevel(viper.GetString("peer.logging.level"))
+ if err == nil {
+ // No error, use the setting
+ logging.SetLevel(level, "main")
+ logging.SetLevel(level, "server")
+ logging.SetLevel(level, "peer")
+ } else {
+ log.Warningf("Log level not recognized '%s', defaulting to %s: %s", viper.GetString("peer.logging.level"), logging.ERROR, err)
+ logging.SetLevel(logging.ERROR, "main")
+ logging.SetLevel(logging.ERROR, "server")
+ logging.SetLevel(logging.ERROR, "peer")
+ }
+}
+
+// SetupTestConfig setup the config during test execution
+func SetupTestConfig() {
+ flag.Parse()
+
+ // Now set the configuration file
+ viper.SetEnvPrefix("HYPERLEDGER")
+ viper.AutomaticEnv()
+ replacer := strings.NewReplacer(".", "_")
+ viper.SetEnvKeyReplacer(replacer)
+ viper.SetConfigName("core") // name of config file (without extension)
+ viper.AddConfigPath("./") // path to look for the config file in
+ viper.AddConfigPath("./../") // path to look for the config file in
+ err := viper.ReadInConfig() // Find and read the config file
+ if err != nil { // Handle errors reading the config file
+ panic(fmt.Errorf("Fatal error config file: %s \n", err))
+ }
+
+ SetupTestLogging()
+
+ // Set the number of maxprocs
+ var numProcsDesired = viper.GetInt("peer.gomaxprocs")
+ log.Debugf("setting Number of procs to %d, was %d\n", numProcsDesired, runtime.GOMAXPROCS(2))
+
+}
+
+// See fabric/core/peer/config.go for comments on the configuration caching
+// methodology.
+
+var coreLogger = logging.MustGetLogger("core")
+
+var configurationCached bool
+var securityEnabled bool
+
+// CacheConfiguration caches configuration settings so that reading the yaml
+// file can be avoided on future requests
+func CacheConfiguration() error {
+ securityEnabled = viper.GetBool("security.enabled")
+ configurationCached = true
+ return nil
+}
+
+func cacheConfiguration() {
+ if err := CacheConfiguration(); err != nil {
+ coreLogger.Errorf("Execution continues after CacheConfiguration() failure : %s", err)
+ }
+}
+
+// SecurityEnabled returns true if security is enabled
+func SecurityEnabled() bool {
+ if !configurationCached {
+ cacheConfiguration()
+ }
+ return securityEnabled
+}
diff --git a/core/config/config.go b/core/config/config.go
new file mode 100644
index 00000000000..36dc677c04b
--- /dev/null
+++ b/core/config/config.go
@@ -0,0 +1,77 @@
+/*
+Copyright IBM Corp. 2016 All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package config
+
+import (
+ "flag"
+ "fmt"
+ "runtime"
+ "strings"
+
+ "github.com/op/go-logging"
+ "github.com/spf13/viper"
+)
+
+// Config the config wrapper structure
+type Config struct {
+}
+
+var configLogger = logging.MustGetLogger("config")
+
+func init() {
+
+}
+
+// SetupTestLogging setup the logging during test execution
+func SetupTestLogging() {
+ level, err := logging.LogLevel(viper.GetString("logging.peer"))
+ if err == nil {
+ // No error, use the setting
+ logging.SetLevel(level, "main")
+ logging.SetLevel(level, "server")
+ logging.SetLevel(level, "peer")
+ } else {
+ configLogger.Warningf("Log level not recognized '%s', defaulting to %s: %s", viper.GetString("logging.peer"), logging.ERROR, err)
+ logging.SetLevel(logging.ERROR, "main")
+ logging.SetLevel(logging.ERROR, "server")
+ logging.SetLevel(logging.ERROR, "peer")
+ }
+}
+
+// SetupTestConfig setup the config during test execution
+func SetupTestConfig(pathToOpenchainYaml string) {
+ flag.Parse()
+
+ // Now set the configuration file
+ viper.SetEnvPrefix("HYPERLEDGER")
+ viper.AutomaticEnv()
+ replacer := strings.NewReplacer(".", "_")
+ viper.SetEnvKeyReplacer(replacer)
+ viper.SetConfigName("core") // name of config file (without extension)
+ viper.AddConfigPath(pathToOpenchainYaml) // path to look for the config file in
+ err := viper.ReadInConfig() // Find and read the config file
+ if err != nil { // Handle errors reading the config file
+ panic(fmt.Errorf("Fatal error config file: %s \n", err))
+ }
+
+ SetupTestLogging()
+
+ // Set the number of maxprocs
+ var numProcsDesired = viper.GetInt("peer.gomaxprocs")
+ configLogger.Debugf("setting Number of procs to %d, was %d\n", numProcsDesired, runtime.GOMAXPROCS(2))
+
+}
diff --git a/core/container/ccintf/ccintf.go b/core/container/ccintf/ccintf.go
new file mode 100644
index 00000000000..3a223c5991d
--- /dev/null
+++ b/core/container/ccintf/ccintf.go
@@ -0,0 +1,50 @@
+/*
+Copyright IBM Corp. 2016 All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package ccintf
+
+//This package defines the interfaces that support runtime and
+//communication between chaincode and peer (chaincode support).
+//Currently inproccontroller uses it. dockercontroller does not.
+
+import (
+ pb "github.com/hyperledger/fabric/protos"
+ "golang.org/x/net/context"
+)
+
+// ChaincodeStream interface for stream between Peer and chaincode instance.
+type ChaincodeStream interface {
+ Send(*pb.ChaincodeMessage) error
+ Recv() (*pb.ChaincodeMessage, error)
+}
+
+// CCSupport must be implemented by the chaincode support side in peer
+// (such as chaincode_support)
+type CCSupport interface {
+ HandleChaincodeStream(context.Context, ChaincodeStream) error
+}
+
+// GetCCHandlerKey is used to pass CCSupport via context
+func GetCCHandlerKey() string {
+ return "CCHANDLER"
+}
+
+//CCID encapsulates chaincode ID
+type CCID struct {
+ ChaincodeSpec *pb.ChaincodeSpec
+ NetworkID string
+ PeerID string
+}
diff --git a/core/container/config.go b/core/container/config.go
new file mode 100644
index 00000000000..ac53bddeae0
--- /dev/null
+++ b/core/container/config.go
@@ -0,0 +1,76 @@
+/*
+Copyright IBM Corp. 2016 All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package container
+
+import (
+ "flag"
+ "fmt"
+ "runtime"
+ "strings"
+
+ "github.com/op/go-logging"
+ "github.com/spf13/viper"
+)
+
+// Config the config wrapper structure
+type Config struct {
+}
+
+func init() {
+
+}
+
+// SetupTestLogging setup the logging during test execution
+func SetupTestLogging() {
+ level, err := logging.LogLevel(viper.GetString("peer.logging.level"))
+ if err == nil {
+ // No error, use the setting
+ logging.SetLevel(level, "main")
+ logging.SetLevel(level, "server")
+ logging.SetLevel(level, "peer")
+ } else {
+ vmLogger.Warningf("Log level not recognized '%s', defaulting to %s: %s", viper.GetString("peer.logging.level"), logging.ERROR, err)
+ logging.SetLevel(logging.ERROR, "main")
+ logging.SetLevel(logging.ERROR, "server")
+ logging.SetLevel(logging.ERROR, "peer")
+ }
+}
+
+// SetupTestConfig setup the config during test execution
+func SetupTestConfig() {
+ flag.Parse()
+
+ // Now set the configuration file
+ viper.SetEnvPrefix("CORE")
+ viper.AutomaticEnv()
+ replacer := strings.NewReplacer(".", "_")
+ viper.SetEnvKeyReplacer(replacer)
+ viper.SetConfigName("core") // name of config file (without extension)
+ viper.AddConfigPath("./") // path to look for the config file in
+ viper.AddConfigPath("./../../peer/") // path to look for the config file in
+ err := viper.ReadInConfig() // Find and read the config file
+ if err != nil { // Handle errors reading the config file
+ panic(fmt.Errorf("Fatal error config file: %s \n", err))
+ }
+
+ SetupTestLogging()
+
+ // Set the number of maxprocs
+ var numProcsDesired = viper.GetInt("peer.gomaxprocs")
+ vmLogger.Debugf("setting Number of procs to %d, was %d\n", numProcsDesired, runtime.GOMAXPROCS(2))
+
+}
diff --git a/core/container/controller.go b/core/container/controller.go
new file mode 100644
index 00000000000..da5276f4aad
--- /dev/null
+++ b/core/container/controller.go
@@ -0,0 +1,277 @@
+/*
+Copyright IBM Corp. 2016 All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package container
+
+import (
+ "fmt"
+ "io"
+ "sync"
+
+ "golang.org/x/net/context"
+
+ "github.com/hyperledger/fabric/core/container/ccintf"
+ "github.com/hyperledger/fabric/core/container/dockercontroller"
+ "github.com/hyperledger/fabric/core/container/inproccontroller"
+)
+
+//abstract virtual image for supporting arbitrary virual machines
+type vm interface {
+ Deploy(ctxt context.Context, ccid ccintf.CCID, args []string, env []string, attachstdin bool, attachstdout bool, reader io.Reader) error
+ Start(ctxt context.Context, ccid ccintf.CCID, args []string, env []string, attachstdin bool, attachstdout bool, reader io.Reader) error
+ Stop(ctxt context.Context, ccid ccintf.CCID, timeout uint, dontkill bool, dontremove bool) error
+ Destroy(ctxt context.Context, ccid ccintf.CCID, force bool, noprune bool) error
+ GetVMName(ccID ccintf.CCID) (string, error)
+}
+
+type refCountedLock struct {
+ refCount int
+ lock *sync.RWMutex
+}
+
+//VMController - manages VMs
+// . abstract construction of different types of VMs (we only care about Docker for now)
+// . manage lifecycle of VM (start with build, start, stop ...
+// eventually probably need fine grained management)
+type VMController struct {
+ sync.RWMutex
+ // Handlers for each chaincode
+ containerLocks map[string]*refCountedLock
+}
+
+//singleton...acess through NewVMController
+var vmcontroller *VMController
+
+//constants for supported containers
+const (
+ DOCKER = "Docker"
+ SYSTEM = "System"
+)
+
+//NewVMController - creates/returns singleton
+func init() {
+ vmcontroller = new(VMController)
+ vmcontroller.containerLocks = make(map[string]*refCountedLock)
+}
+
+func (vmc *VMController) newVM(typ string) vm {
+ var (
+ v vm
+ )
+
+ switch typ {
+ case DOCKER:
+ v = &dockercontroller.DockerVM{}
+ case SYSTEM:
+ v = &inproccontroller.InprocVM{}
+ default:
+ v = &dockercontroller.DockerVM{}
+ }
+ return v
+}
+
+func (vmc *VMController) lockContainer(id string) {
+ //get the container lock under global lock
+ vmcontroller.Lock()
+ var refLck *refCountedLock
+ var ok bool
+ if refLck, ok = vmcontroller.containerLocks[id]; !ok {
+ refLck = &refCountedLock{refCount: 1, lock: &sync.RWMutex{}}
+ vmcontroller.containerLocks[id] = refLck
+ } else {
+ refLck.refCount++
+ vmLogger.Debugf("refcount %d (%s)", refLck.refCount, id)
+ }
+ vmcontroller.Unlock()
+ vmLogger.Debugf("waiting for container(%s) lock", id)
+ refLck.lock.Lock()
+ vmLogger.Debugf("got container (%s) lock", id)
+}
+
+func (vmc *VMController) unlockContainer(id string) {
+ vmcontroller.Lock()
+ if refLck, ok := vmcontroller.containerLocks[id]; ok {
+ if refLck.refCount <= 0 {
+ panic("refcnt <= 0")
+ }
+ refLck.lock.Unlock()
+ if refLck.refCount--; refLck.refCount == 0 {
+ vmLogger.Debugf("container lock deleted(%s)", id)
+ delete(vmcontroller.containerLocks, id)
+ }
+ } else {
+ vmLogger.Debugf("no lock to unlock(%s)!!", id)
+ }
+ vmcontroller.Unlock()
+}
+
+//VMCReqIntf - all requests should implement this interface.
+//The context should be passed and tested at each layer till we stop
+//note that we'd stop on the first method on the stack that does not
+//take context
+type VMCReqIntf interface {
+ do(ctxt context.Context, v vm) VMCResp
+ getCCID() ccintf.CCID
+}
+
+//VMCResp - response from requests. resp field is a anon interface.
+//It can hold any response. err should be tested first
+type VMCResp struct {
+ Err error
+ Resp interface{}
+}
+
+//CreateImageReq - properties for creating an container image
+type CreateImageReq struct {
+ ccintf.CCID
+ Reader io.Reader
+ AttachStdin bool
+ AttachStdout bool
+ Args []string
+ Env []string
+}
+
+func (bp CreateImageReq) do(ctxt context.Context, v vm) VMCResp {
+ var resp VMCResp
+
+ if err := v.Deploy(ctxt, bp.CCID, bp.Args, bp.Env, bp.AttachStdin, bp.AttachStdout, bp.Reader); err != nil {
+ resp = VMCResp{Err: err}
+ } else {
+ resp = VMCResp{}
+ }
+
+ return resp
+}
+
+func (bp CreateImageReq) getCCID() ccintf.CCID {
+ return bp.CCID
+}
+
+//StartImageReq - properties for starting a container.
+type StartImageReq struct {
+ ccintf.CCID
+ Reader io.Reader
+ Args []string
+ Env []string
+ AttachStdin bool
+ AttachStdout bool
+}
+
+func (si StartImageReq) do(ctxt context.Context, v vm) VMCResp {
+ var resp VMCResp
+
+ if err := v.Start(ctxt, si.CCID, si.Args, si.Env, si.AttachStdin, si.AttachStdout, si.Reader); err != nil {
+ resp = VMCResp{Err: err}
+ } else {
+ resp = VMCResp{}
+ }
+
+ return resp
+}
+
+func (si StartImageReq) getCCID() ccintf.CCID {
+ return si.CCID
+}
+
+//StopImageReq - properties for stopping a container.
+type StopImageReq struct {
+ ccintf.CCID
+ Timeout uint
+ //by default we will kill the container after stopping
+ Dontkill bool
+ //by default we will remove the container after killing
+ Dontremove bool
+}
+
+func (si StopImageReq) do(ctxt context.Context, v vm) VMCResp {
+ var resp VMCResp
+
+ if err := v.Stop(ctxt, si.CCID, si.Timeout, si.Dontkill, si.Dontremove); err != nil {
+ resp = VMCResp{Err: err}
+ } else {
+ resp = VMCResp{}
+ }
+
+ return resp
+}
+
+func (si StopImageReq) getCCID() ccintf.CCID {
+ return si.CCID
+}
+
+//DestroyImageReq - properties for stopping a container.
+type DestroyImageReq struct {
+ ccintf.CCID
+ Timeout uint
+ Force bool
+ NoPrune bool
+}
+
+func (di DestroyImageReq) do(ctxt context.Context, v vm) VMCResp {
+ var resp VMCResp
+
+ if err := v.Destroy(ctxt, di.CCID, di.Force, di.NoPrune); err != nil {
+ resp = VMCResp{Err: err}
+ } else {
+ resp = VMCResp{}
+ }
+
+ return resp
+}
+
+func (di DestroyImageReq) getCCID() ccintf.CCID {
+ return di.CCID
+}
+
+//VMCProcess should be used as follows
+// . construct a context
+// . construct req of the right type (e.g., CreateImageReq)
+// . call it in a go routine
+// . process response in the go routing
+//context can be cancelled. VMCProcess will try to cancel calling functions if it can
+//For instance docker clients api's such as BuildImage are not cancelable.
+//In all cases VMCProcess will wait for the called go routine to return
+func VMCProcess(ctxt context.Context, vmtype string, req VMCReqIntf) (interface{}, error) {
+ v := vmcontroller.newVM(vmtype)
+
+ if v == nil {
+ return nil, fmt.Errorf("Unknown VM type %s", vmtype)
+ }
+
+ c := make(chan struct{})
+ var resp interface{}
+ go func() {
+ defer close(c)
+
+ id, err := v.GetVMName(req.getCCID())
+ if err != nil {
+ resp = VMCResp{Err: err}
+ return
+ }
+ vmcontroller.lockContainer(id)
+ resp = req.do(ctxt, v)
+ vmcontroller.unlockContainer(id)
+ }()
+
+ select {
+ case <-c:
+ return resp, nil
+ case <-ctxt.Done():
+ //TODO cancel req.do ... (needed) ?
+ <-c
+ return nil, ctxt.Err()
+ }
+}
diff --git a/core/container/controller_test.go b/core/container/controller_test.go
new file mode 100644
index 00000000000..bb6f2ee95dc
--- /dev/null
+++ b/core/container/controller_test.go
@@ -0,0 +1,272 @@
+/*
+Copyright IBM Corp. 2016 All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package container
+
+import (
+ "archive/tar"
+ "bytes"
+ "compress/gzip"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "testing"
+ "time"
+
+ "github.com/hyperledger/fabric/core/container/ccintf"
+ pb "github.com/hyperledger/fabric/protos"
+
+ "golang.org/x/net/context"
+)
+
+/**** not using actual files from file system for testing.... use these funcs if we want to do that
+func getCodeChainBytes(pathtocodechain string) (io.Reader, error) {
+ inputbuf := bytes.NewBuffer(nil)
+ gw := gzip.NewWriter(inputbuf)
+ tr := tar.NewWriter(gw)
+ // Get the Tar contents for the image
+ err := writeCodeChainTar(pathtocodechain, tr)
+ tr.Close()
+ gw.Close()
+ if err != nil {
+ return nil, errors.New(fmt.Sprintf("Error getting codechain tar: %s", err))
+ }
+ ioutil.WriteFile("/tmp/chaincode.tar", inputbuf.Bytes(), 0644)
+ return inputbuf, nil
+}
+
+func writeCodeChainTar(pathtocodechain string, tw *tar.Writer) error {
+ root_directory := pathtocodechain //use full path
+ fmt.Printf("tar %s start(%s)\n", root_directory, time.Now())
+
+ walkFn := func(path string, info os.FileInfo, err error) error {
+ fmt.Printf("path %s(%s)\n", path, info.Name())
+ if info == nil {
+ return errors.New(fmt.Sprintf("Error walking the path: %s", path))
+ }
+
+ if info.Mode().IsDir() {
+ return nil
+ }
+ // Because of scoping we can reference the external root_directory variable
+ //new_path := fmt.Sprintf("%s", path[len(root_directory):])
+ new_path := info.Name()
+
+ if len(new_path) == 0 {
+ return nil
+ }
+
+ fr, err := os.Open(path)
+ if err != nil {
+ return err
+ }
+ defer fr.Close()
+
+ if h, err := tar.FileInfoHeader(info, new_path); err != nil {
+ fmt.Printf(fmt.Sprintf("Error getting FileInfoHeader: %s\n", err))
+ return err
+ } else {
+ h.Name = new_path
+ if err = tw.WriteHeader(h); err != nil {
+ fmt.Printf(fmt.Sprintf("Error writing header: %s\n", err))
+ return err
+ }
+ }
+ if length, err := io.Copy(tw, fr); err != nil {
+ return err
+ } else {
+ fmt.Printf("Length of entry = %d\n", length)
+ }
+ return nil
+ }
+
+ if err := filepath.Walk(root_directory, walkFn); err != nil {
+ fmt.Printf("Error walking root_directory: %s\n", err)
+ return err
+ } else {
+ // Write the tar file out
+ if err := tw.Close(); err != nil {
+ return err
+ }
+ }
+ fmt.Printf("tar end = %s\n", time.Now())
+ return nil
+}
+*********************/
+
+func getCodeChainBytesInMem() (io.Reader, error) {
+ startTime := time.Now()
+ inputbuf := bytes.NewBuffer(nil)
+ gw := gzip.NewWriter(inputbuf)
+ tr := tar.NewWriter(gw)
+ dockerFileContents := []byte("FROM busybox:latest\n\nCMD echo hello")
+ dockerFileSize := int64(len([]byte(dockerFileContents)))
+
+ tr.WriteHeader(&tar.Header{Name: "Dockerfile", Size: dockerFileSize, ModTime: startTime, AccessTime: startTime, ChangeTime: startTime})
+ tr.Write([]byte(dockerFileContents))
+ tr.Close()
+ gw.Close()
+ ioutil.WriteFile("/tmp/chaincode.tar", inputbuf.Bytes(), 0644)
+ return inputbuf, nil
+}
+
+//set to true by providing "-run-controller-tests" command line option... Tests will create a docker image called "simple"
+var runTests bool
+
+func testForSkip(t *testing.T) {
+ //run tests
+ if !runTests {
+ t.SkipNow()
+ }
+}
+
+func TestVMCBuildImage(t *testing.T) {
+ testForSkip(t)
+ var ctxt = context.Background()
+
+ //get the tarball for codechain
+ tarRdr, err := getCodeChainBytesInMem()
+ if err != nil {
+ t.Fail()
+ t.Logf("Error reading tar file: %s", err)
+ return
+ }
+
+ c := make(chan struct{})
+
+ //creat a CreateImageReq obj and send it to VMCProcess
+ go func() {
+ defer close(c)
+ cir := CreateImageReq{CCID: ccintf.CCID{ChaincodeSpec: &pb.ChaincodeSpec{ChaincodeID: &pb.ChaincodeID{Name: "simple"}}}, Reader: tarRdr, AttachStdout: true}
+ _, err := VMCProcess(ctxt, "Docker", cir)
+ if err != nil {
+ t.Fail()
+ t.Logf("Error creating image: %s", err)
+ return
+ }
+ }()
+
+ //wait for VMController to complete.
+ fmt.Println("VMCBuildImage-waiting for response")
+ <-c
+}
+
+func TestVMCStartContainer(t *testing.T) {
+ testForSkip(t)
+
+ var ctxt = context.Background()
+
+ c := make(chan struct{})
+
+ //create a StartImageReq obj and send it to VMCProcess
+ go func() {
+ defer close(c)
+ sir := StartImageReq{CCID: ccintf.CCID{ChaincodeSpec: &pb.ChaincodeSpec{ChaincodeID: &pb.ChaincodeID{Name: "simple"}}}}
+ _, err := VMCProcess(ctxt, "Docker", sir)
+ if err != nil {
+ t.Fail()
+ t.Logf("Error starting container: %s", err)
+ return
+ }
+ }()
+
+ //wait for VMController to complete.
+ fmt.Println("VMCStartContainer-waiting for response")
+ <-c
+ stopr := StopImageReq{CCID: ccintf.CCID{ChaincodeSpec: &pb.ChaincodeSpec{ChaincodeID: &pb.ChaincodeID{Name: "simple"}}}, Timeout: 0, Dontremove: true}
+ VMCProcess(ctxt, "Docker", stopr)
+}
+
+func TestVMCCreateAndStartContainer(t *testing.T) {
+ testForSkip(t)
+
+ var ctxt = context.Background()
+
+ c := make(chan struct{})
+
+ //create a StartImageReq obj and send it to VMCProcess
+ go func() {
+ defer close(c)
+
+ //stop and delete the container first (if it exists)
+ stopir := StopImageReq{CCID: ccintf.CCID{ChaincodeSpec: &pb.ChaincodeSpec{ChaincodeID: &pb.ChaincodeID{Name: "simple"}}}, Timeout: 0}
+ VMCProcess(ctxt, "Docker", stopir)
+
+ startir := StartImageReq{CCID: ccintf.CCID{ChaincodeSpec: &pb.ChaincodeSpec{ChaincodeID: &pb.ChaincodeID{Name: "simple"}}}}
+ r, err := VMCProcess(ctxt, "Docker", startir)
+ if err != nil {
+ t.Fail()
+ t.Logf("Error starting container: %s", err)
+ return
+ }
+ vmcresp, ok := r.(VMCResp)
+ if !ok {
+ t.Fatalf("invalid response from VMCProcess")
+ }
+ if vmcresp.Err != nil {
+ t.Fail()
+ t.Logf("docker error starting container: %s", vmcresp.Err)
+ return
+ }
+ }()
+
+ //wait for VMController to complete.
+ fmt.Println("VMCStartContainer-waiting for response")
+ <-c
+ //stopr := StopImageReq{ID: "simple", Timeout: 0, Dontremove: true}
+ //VMCProcess(ctxt, "Docker", stopr)
+}
+
+func TestVMCSyncStartContainer(t *testing.T) {
+ testForSkip(t)
+
+ var ctxt = context.Background()
+
+ //creat a StartImageReq obj and send it to VMCProcess
+ sir := StartImageReq{CCID: ccintf.CCID{ChaincodeSpec: &pb.ChaincodeSpec{ChaincodeID: &pb.ChaincodeID{Name: "simple"}}}}
+ _, err := VMCProcess(ctxt, "Docker", sir)
+ if err != nil {
+ t.Fail()
+ t.Logf("Error starting container: %s", err)
+ return
+ }
+ stopr := StopImageReq{CCID: ccintf.CCID{ChaincodeSpec: &pb.ChaincodeSpec{ChaincodeID: &pb.ChaincodeID{Name: "simple"}}}, Timeout: 0, Dontremove: true}
+ VMCProcess(ctxt, "Docker", stopr)
+}
+
+func TestVMCStopContainer(t *testing.T) {
+ testForSkip(t)
+
+ var ctxt = context.Background()
+
+ c := make(chan struct{})
+
+ //creat a StopImageReq obj and send it to VMCProcess
+ go func() {
+ defer close(c)
+ sir := StopImageReq{CCID: ccintf.CCID{ChaincodeSpec: &pb.ChaincodeSpec{ChaincodeID: &pb.ChaincodeID{Name: "simple"}}}, Timeout: 0}
+ _, err := VMCProcess(ctxt, "Docker", sir)
+ if err != nil {
+ t.Fail()
+ t.Logf("Error stopping container: %s", err)
+ return
+ }
+ }()
+
+ //wait for VMController to complete.
+ fmt.Println("VMCStopContainer-waiting for response")
+ <-c
+}
diff --git a/core/container/dockercontroller/dockercontroller.go b/core/container/dockercontroller/dockercontroller.go
new file mode 100644
index 00000000000..79dc1bc0977
--- /dev/null
+++ b/core/container/dockercontroller/dockercontroller.go
@@ -0,0 +1,279 @@
+/*
+Copyright IBM Corp. 2016 All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package dockercontroller
+
+import (
+ "bytes"
+ "fmt"
+ "io"
+ "strings"
+
+ "github.com/fsouza/go-dockerclient"
+ "github.com/hyperledger/fabric/core/container/ccintf"
+ cutil "github.com/hyperledger/fabric/core/container/util"
+ "github.com/op/go-logging"
+ "github.com/spf13/viper"
+ "golang.org/x/net/context"
+)
+
+var (
+ dockerLogger = logging.MustGetLogger("dockercontroller")
+ hostConfig *docker.HostConfig
+)
+
+//DockerVM is a vm. It is identified by an image id
+type DockerVM struct {
+ id string
+}
+
+func getDockerHostConfig() *docker.HostConfig {
+ if hostConfig != nil {
+ return hostConfig
+ }
+ dockerKey := func(key string) string {
+ return "vm.docker.hostConfig." + key
+ }
+ getInt64 := func(key string) int64 {
+ defer func() {
+ if err := recover(); err != nil {
+ dockerLogger.Warningf("load vm.docker.hostConfig.%s failed, error: %v", key, err)
+ }
+ }()
+ n := viper.GetInt(dockerKey(key))
+ return int64(n)
+ }
+
+ var logConfig docker.LogConfig
+ err := viper.UnmarshalKey(dockerKey("LogConfig"), &logConfig)
+ if err != nil {
+ dockerLogger.Warningf("load docker HostConfig.LogConfig failed, error: %s", err.Error())
+ }
+ networkMode := viper.GetString(dockerKey("NetworkMode"))
+ if networkMode == "" {
+ networkMode = "host"
+ }
+ dockerLogger.Debugf("docker container hostconfig NetworkMode: %s", networkMode)
+
+ hostConfig = &docker.HostConfig{
+ CapAdd: viper.GetStringSlice(dockerKey("CapAdd")),
+ CapDrop: viper.GetStringSlice(dockerKey("CapDrop")),
+
+ DNS: viper.GetStringSlice(dockerKey("Dns")),
+ DNSSearch: viper.GetStringSlice(dockerKey("DnsSearch")),
+ ExtraHosts: viper.GetStringSlice(dockerKey("ExtraHosts")),
+ NetworkMode: networkMode,
+ IpcMode: viper.GetString(dockerKey("IpcMode")),
+ PidMode: viper.GetString(dockerKey("PidMode")),
+ UTSMode: viper.GetString(dockerKey("UTSMode")),
+ LogConfig: logConfig,
+
+ ReadonlyRootfs: viper.GetBool(dockerKey("ReadonlyRootfs")),
+ SecurityOpt: viper.GetStringSlice(dockerKey("SecurityOpt")),
+ CgroupParent: viper.GetString(dockerKey("CgroupParent")),
+ Memory: getInt64("Memory"),
+ MemorySwap: getInt64("MemorySwap"),
+ MemorySwappiness: getInt64("MemorySwappiness"),
+ OOMKillDisable: viper.GetBool(dockerKey("OomKillDisable")),
+ CPUShares: getInt64("CpuShares"),
+ CPUSet: viper.GetString(dockerKey("Cpuset")),
+ CPUSetCPUs: viper.GetString(dockerKey("CpusetCPUs")),
+ CPUSetMEMs: viper.GetString(dockerKey("CpusetMEMs")),
+ CPUQuota: getInt64("CpuQuota"),
+ CPUPeriod: getInt64("CpuPeriod"),
+ BlkioWeight: getInt64("BlkioWeight"),
+ }
+
+ return hostConfig
+}
+
+func (vm *DockerVM) createContainer(ctxt context.Context, client *docker.Client, imageID string, containerID string, args []string, env []string, attachstdin bool, attachstdout bool) error {
+ config := docker.Config{Cmd: args, Image: imageID, Env: env, AttachStdin: attachstdin, AttachStdout: attachstdout}
+ copts := docker.CreateContainerOptions{Name: containerID, Config: &config, HostConfig: getDockerHostConfig()}
+ dockerLogger.Debugf("Create container: %s", containerID)
+ _, err := client.CreateContainer(copts)
+ if err != nil {
+ return err
+ }
+ dockerLogger.Debugf("Created container: %s", imageID)
+ return nil
+}
+
+func (vm *DockerVM) deployImage(client *docker.Client, ccid ccintf.CCID, args []string, env []string, attachstdin bool, attachstdout bool, reader io.Reader) error {
+ id, _ := vm.GetVMName(ccid)
+ outputbuf := bytes.NewBuffer(nil)
+ opts := docker.BuildImageOptions{
+ Name: id,
+ Pull: false,
+ InputStream: reader,
+ OutputStream: outputbuf,
+ }
+
+ if err := client.BuildImage(opts); err != nil {
+ dockerLogger.Errorf("Error building images: %s", err)
+ dockerLogger.Errorf("Image Output:\n********************\n%s\n********************", outputbuf.String())
+ return err
+ }
+
+ dockerLogger.Debugf("Created image: %s", id)
+
+ return nil
+}
+
+//Deploy use the reader containing targz to create a docker image
+//for docker inputbuf is tar reader ready for use by docker.Client
+//the stream from end client to peer could directly be this tar stream
+//talk to docker daemon using docker Client and build the image
+func (vm *DockerVM) Deploy(ctxt context.Context, ccid ccintf.CCID, args []string, env []string, attachstdin bool, attachstdout bool, reader io.Reader) error {
+ client, err := cutil.NewDockerClient()
+ switch err {
+ case nil:
+ if err = vm.deployImage(client, ccid, args, env, attachstdin, attachstdout, reader); err != nil {
+ return err
+ }
+ default:
+ return fmt.Errorf("Error creating docker client: %s", err)
+ }
+ return nil
+}
+
+//Start starts a container using a previously created docker image
+func (vm *DockerVM) Start(ctxt context.Context, ccid ccintf.CCID, args []string, env []string, attachstdin bool, attachstdout bool, reader io.Reader) error {
+ imageID, _ := vm.GetVMName(ccid)
+ client, err := cutil.NewDockerClient()
+ if err != nil {
+ dockerLogger.Debugf("start - cannot create client %s", err)
+ return err
+ }
+
+ containerID := strings.Replace(imageID, ":", "_", -1)
+
+ //stop,force remove if necessary
+ dockerLogger.Debugf("Cleanup container %s", containerID)
+ vm.stopInternal(ctxt, client, containerID, 0, false, false)
+
+ dockerLogger.Debugf("Start container %s", containerID)
+ err = vm.createContainer(ctxt, client, imageID, containerID, args, env, attachstdin, attachstdout)
+ if err != nil {
+ //if image not found try to create image and retry
+ if err == docker.ErrNoSuchImage {
+ if reader != nil {
+ dockerLogger.Debugf("start-could not find image ...attempt to recreate image %s", err)
+ if err = vm.deployImage(client, ccid, args, env, attachstdin, attachstdout, reader); err != nil {
+ return err
+ }
+
+ dockerLogger.Debug("start-recreated image successfully")
+ if err = vm.createContainer(ctxt, client, imageID, containerID, args, env, attachstdin, attachstdout); err != nil {
+ dockerLogger.Errorf("start-could not recreate container post recreate image: %s", err)
+ return err
+ }
+ } else {
+ dockerLogger.Errorf("start-could not find image: %s", err)
+ return err
+ }
+ } else {
+ dockerLogger.Errorf("start-could not recreate container %s", err)
+ return err
+ }
+ }
+
+ // Baohua: getDockerHostConfig() will be ignored when communicating with docker API 1.24+.
+ // I keep it here for a short-term compatibility.
+ // See https://goo.gl/ZvtkKm for more details.
+ err = client.StartContainer(containerID, getDockerHostConfig())
+ if err != nil {
+ dockerLogger.Errorf("start-could not start container %s", err)
+ return err
+ }
+
+ dockerLogger.Debugf("Started container %s", containerID)
+ return nil
+}
+
+//Stop stops a running chaincode
+func (vm *DockerVM) Stop(ctxt context.Context, ccid ccintf.CCID, timeout uint, dontkill bool, dontremove bool) error {
+ id, _ := vm.GetVMName(ccid)
+ client, err := cutil.NewDockerClient()
+ if err != nil {
+ dockerLogger.Debugf("start - cannot create client %s", err)
+ return err
+ }
+ id = strings.Replace(id, ":", "_", -1)
+
+ err = vm.stopInternal(ctxt, client, id, timeout, dontkill, dontremove)
+
+ return err
+}
+
+func (vm *DockerVM) stopInternal(ctxt context.Context, client *docker.Client, id string, timeout uint, dontkill bool, dontremove bool) error {
+ err := client.StopContainer(id, timeout)
+ if err != nil {
+ dockerLogger.Debugf("Stop container %s(%s)", id, err)
+ } else {
+ dockerLogger.Debugf("Stopped container %s", id)
+ }
+ if !dontkill {
+ err = client.KillContainer(docker.KillContainerOptions{ID: id})
+ if err != nil {
+ dockerLogger.Debugf("Kill container %s (%s)", id, err)
+ } else {
+ dockerLogger.Debugf("Killed container %s", id)
+ }
+ }
+ if !dontremove {
+ err = client.RemoveContainer(docker.RemoveContainerOptions{ID: id, Force: true})
+ if err != nil {
+ dockerLogger.Debugf("Remove container %s (%s)", id, err)
+ } else {
+ dockerLogger.Debugf("Removed container %s", id)
+ }
+ }
+ return err
+}
+
+//Destroy destroys an image
+func (vm *DockerVM) Destroy(ctxt context.Context, ccid ccintf.CCID, force bool, noprune bool) error {
+ id, _ := vm.GetVMName(ccid)
+ client, err := cutil.NewDockerClient()
+ if err != nil {
+ dockerLogger.Errorf("destroy-cannot create client %s", err)
+ return err
+ }
+ id = strings.Replace(id, ":", "_", -1)
+
+ err = client.RemoveImageExtended(id, docker.RemoveImageOptions{Force: force, NoPrune: noprune})
+
+ if err != nil {
+ dockerLogger.Errorf("error while destroying image: %s", err)
+ } else {
+ dockerLogger.Debug("Destroyed image %s", id)
+ }
+
+ return err
+}
+
+//GetVMName generates the docker image from peer information given the hashcode. This is needed to
+//keep image name's unique in a single host, multi-peer environment (such as a development environment)
+func (vm *DockerVM) GetVMName(ccid ccintf.CCID) (string, error) {
+ if ccid.NetworkID != "" {
+ return fmt.Sprintf("%s-%s-%s", ccid.NetworkID, ccid.PeerID, ccid.ChaincodeSpec.ChaincodeID.Name), nil
+ } else if ccid.PeerID != "" {
+ return fmt.Sprintf("%s-%s", ccid.PeerID, ccid.ChaincodeSpec.ChaincodeID.Name), nil
+ } else {
+ return ccid.ChaincodeSpec.ChaincodeID.Name, nil
+ }
+}
diff --git a/core/container/dockercontroller/dockercontroller_test.go b/core/container/dockercontroller/dockercontroller_test.go
new file mode 100644
index 00000000000..6e4b5e3f867
--- /dev/null
+++ b/core/container/dockercontroller/dockercontroller_test.go
@@ -0,0 +1,56 @@
+/*
+Copyright IBM Corp. 2016 All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package dockercontroller
+
+import (
+ "fmt"
+ "os"
+ "testing"
+
+ "github.com/spf13/viper"
+
+ "github.com/fsouza/go-dockerclient"
+ "github.com/hyperledger/fabric/core/config"
+ "github.com/hyperledger/fabric/core/ledger/testutil"
+)
+
+func TestHostConfig(t *testing.T) {
+ config.SetupTestConfig("./../../../peer")
+ var hostConfig = new(docker.HostConfig)
+ err := viper.UnmarshalKey("vm.docker.hostConfig", hostConfig)
+ if err != nil {
+ t.Fatalf("Load docker HostConfig wrong, error: %s", err.Error())
+ }
+ testutil.AssertNotEquals(t, hostConfig.LogConfig, nil)
+ testutil.AssertEquals(t, hostConfig.LogConfig.Type, "json-file")
+ testutil.AssertEquals(t, hostConfig.LogConfig.Config["max-size"], "50m")
+ testutil.AssertEquals(t, hostConfig.LogConfig.Config["max-file"], "5")
+}
+
+func TestGetDockerHostConfig(t *testing.T) {
+ os.Setenv("HYPERLEDGER_VM_DOCKER_HOSTCONFIG_NETWORKMODE", "overlay")
+ os.Setenv("HYPERLEDGER_VM_DOCKER_HOSTCONFIG_CPUSHARES", fmt.Sprint(1024*1024*1024*2))
+ config.SetupTestConfig("./../../../peer")
+ hostConfig := getDockerHostConfig()
+ testutil.AssertNotNil(t, hostConfig)
+ testutil.AssertEquals(t, hostConfig.NetworkMode, "overlay")
+ testutil.AssertEquals(t, hostConfig.LogConfig.Type, "json-file")
+ testutil.AssertEquals(t, hostConfig.LogConfig.Config["max-size"], "50m")
+ testutil.AssertEquals(t, hostConfig.LogConfig.Config["max-file"], "5")
+ testutil.AssertEquals(t, hostConfig.Memory, int64(1024*1024*1024*2))
+ testutil.AssertEquals(t, hostConfig.CPUShares, int64(1024*1024*1024*2))
+}
diff --git a/core/container/inproccontroller/inproccontroller.go b/core/container/inproccontroller/inproccontroller.go
new file mode 100644
index 00000000000..8d3da3c4b89
--- /dev/null
+++ b/core/container/inproccontroller/inproccontroller.go
@@ -0,0 +1,221 @@
+/*
+Copyright IBM Corp. 2016 All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package inproccontroller
+
+import (
+ "fmt"
+ "io"
+
+ "github.com/hyperledger/fabric/core/chaincode/shim"
+ "github.com/hyperledger/fabric/core/container/ccintf"
+ pb "github.com/hyperledger/fabric/protos"
+ "github.com/op/go-logging"
+
+ "golang.org/x/net/context"
+)
+
+type inprocContainer struct {
+ chaincode shim.Chaincode
+ running bool
+ args []string
+ env []string
+ stopChan chan struct{}
+}
+
+var (
+ inprocLogger = logging.MustGetLogger("inproccontroller")
+ typeRegistry = make(map[string]*inprocContainer)
+ instRegistry = make(map[string]*inprocContainer)
+)
+
+//Register registers system chaincode with given path. The deploy should be called to initialize
+func Register(path string, cc shim.Chaincode) error {
+ tmp := typeRegistry[path]
+ if tmp != nil {
+ return fmt.Errorf(fmt.Sprintf("%s is registered", path))
+ }
+
+ typeRegistry[path] = &inprocContainer{chaincode: cc}
+ return nil
+}
+
+//InprocVM is a vm. It is identified by a executable name
+type InprocVM struct {
+ id string
+}
+
+func (vm *InprocVM) getInstance(ctxt context.Context, ipctemplate *inprocContainer, ccid ccintf.CCID, args []string, env []string) (*inprocContainer, error) {
+ ipc := instRegistry[ccid.ChaincodeSpec.ChaincodeID.Name]
+ if ipc != nil {
+ inprocLogger.Warningf("chaincode instance exists for %s", ccid.ChaincodeSpec.ChaincodeID.Name)
+ return ipc, nil
+ }
+ ipc = &inprocContainer{args: args, env: env, chaincode: ipctemplate.chaincode, stopChan: make(chan struct{})}
+ instRegistry[ccid.ChaincodeSpec.ChaincodeID.Name] = ipc
+ inprocLogger.Debugf("chaincode instance created for %s", ccid.ChaincodeSpec.ChaincodeID.Name)
+ return ipc, nil
+}
+
+//Deploy verifies chaincode is registered and creates an instance for it. Currently only one instance can be created
+func (vm *InprocVM) Deploy(ctxt context.Context, ccid ccintf.CCID, args []string, env []string, attachstdin bool, attachstdout bool, reader io.Reader) error {
+ path := ccid.ChaincodeSpec.ChaincodeID.Path
+
+ ipctemplate := typeRegistry[path]
+ if ipctemplate == nil {
+ return fmt.Errorf(fmt.Sprintf("%s not registered. Please register the system chaincode in inprocinstances.go", path))
+ }
+
+ if ipctemplate.chaincode == nil {
+ return fmt.Errorf(fmt.Sprintf("%s system chaincode does not contain chaincode instance", path))
+ }
+
+ _, err := vm.getInstance(ctxt, ipctemplate, ccid, args, env)
+
+ //FUTURE ... here is where we might check code for safety
+ inprocLogger.Debugf("registered : %s", path)
+
+ return err
+}
+
+func (ipc *inprocContainer) launchInProc(ctxt context.Context, id string, args []string, env []string, ccSupport ccintf.CCSupport) error {
+ peerRcvCCSend := make(chan *pb.ChaincodeMessage)
+ ccRcvPeerSend := make(chan *pb.ChaincodeMessage)
+ var err error
+ ccchan := make(chan struct{}, 1)
+ ccsupportchan := make(chan struct{}, 1)
+ go func() {
+ defer close(ccchan)
+ inprocLogger.Debugf("chaincode started for %s", id)
+ if args == nil {
+ args = ipc.args
+ }
+ if env == nil {
+ env = ipc.env
+ }
+ err := shim.StartInProc(env, args, ipc.chaincode, ccRcvPeerSend, peerRcvCCSend)
+ if err != nil {
+ err = fmt.Errorf("chaincode-support ended with err: %s", err)
+ inprocLogger.Errorf("%s", err)
+ }
+ inprocLogger.Debugf("chaincode ended with for %s with err: %s", id, err)
+ }()
+
+ go func() {
+ defer close(ccsupportchan)
+ inprocStream := newInProcStream(peerRcvCCSend, ccRcvPeerSend)
+ inprocLogger.Debugf("chaincode-support started for %s", id)
+ err := ccSupport.HandleChaincodeStream(ctxt, inprocStream)
+ if err != nil {
+ err = fmt.Errorf("chaincode ended with err: %s", err)
+ inprocLogger.Errorf("%s", err)
+ }
+ inprocLogger.Debugf("chaincode-support ended with for %s with err: %s", id, err)
+ }()
+
+ select {
+ case <-ccchan:
+ close(peerRcvCCSend)
+ inprocLogger.Debugf("chaincode %s quit", id)
+ case <-ccsupportchan:
+ close(ccRcvPeerSend)
+ inprocLogger.Debugf("chaincode support %s quit", id)
+ case <-ipc.stopChan:
+ close(ccRcvPeerSend)
+ close(peerRcvCCSend)
+ inprocLogger.Debugf("chaincode %s stopped", id)
+ }
+
+ return err
+}
+
+//Start starts a previously registered system codechain
+func (vm *InprocVM) Start(ctxt context.Context, ccid ccintf.CCID, args []string, env []string, attachstdin bool, attachstdout bool, reader io.Reader) error {
+ path := ccid.ChaincodeSpec.ChaincodeID.Path
+
+ ipctemplate := typeRegistry[path]
+
+ if ipctemplate == nil {
+ return fmt.Errorf(fmt.Sprintf("%s not registered", path))
+ }
+
+ ipc, err := vm.getInstance(ctxt, ipctemplate, ccid, args, env)
+
+ if err != nil {
+ return fmt.Errorf(fmt.Sprintf("could not create instance for %s", ccid.ChaincodeSpec.ChaincodeID.Name))
+ }
+
+ if ipc.running {
+ return fmt.Errorf(fmt.Sprintf("chaincode running %s", path))
+ }
+
+ //TODO VALIDITY CHECKS ?
+
+ ccSupport, ok := ctxt.Value(ccintf.GetCCHandlerKey()).(ccintf.CCSupport)
+ if !ok || ccSupport == nil {
+ return fmt.Errorf("in-process communication generator not supplied")
+ }
+
+ ipc.running = true
+
+ go func() {
+ defer func() {
+ if r := recover(); r != nil {
+ inprocLogger.Criticalf("caught panic from chaincode %s", ccid.ChaincodeSpec.ChaincodeID.Name)
+ }
+ }()
+ ipc.launchInProc(ctxt, ccid.ChaincodeSpec.ChaincodeID.Name, args, env, ccSupport)
+ }()
+
+ return nil
+}
+
+//Stop stops a system codechain
+func (vm *InprocVM) Stop(ctxt context.Context, ccid ccintf.CCID, timeout uint, dontkill bool, dontremove bool) error {
+ path := ccid.ChaincodeSpec.ChaincodeID.Path
+
+ ipctemplate := typeRegistry[path]
+ if ipctemplate == nil {
+ return fmt.Errorf("%s not registered", path)
+ }
+
+ ipc := instRegistry[ccid.ChaincodeSpec.ChaincodeID.Name]
+
+ if ipc == nil {
+ return fmt.Errorf("%s not found", ccid.ChaincodeSpec.ChaincodeID.Name)
+ }
+
+ if !ipc.running {
+ return fmt.Errorf("%s not running", ccid.ChaincodeSpec.ChaincodeID.Name)
+ }
+
+ ipc.stopChan <- struct{}{}
+
+ delete(instRegistry, ccid.ChaincodeSpec.ChaincodeID.Name)
+ //TODO stop
+ return nil
+}
+
+//Destroy destroys an image
+func (vm *InprocVM) Destroy(ctxt context.Context, ccid ccintf.CCID, force bool, noprune bool) error {
+ //not implemented
+ return nil
+}
+
+//GetVMName ignores the peer and network name as it just needs to be unique in process
+func (vm *InprocVM) GetVMName(ccid ccintf.CCID) (string, error) {
+ return ccid.ChaincodeSpec.ChaincodeID.Name, nil
+}
diff --git a/core/container/inproccontroller/inprocstream.go b/core/container/inproccontroller/inprocstream.go
new file mode 100644
index 00000000000..3604b25fb72
--- /dev/null
+++ b/core/container/inproccontroller/inprocstream.go
@@ -0,0 +1,41 @@
+/*
+Copyright IBM Corp. 2016 All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package inproccontroller
+
+import (
+ pb "github.com/hyperledger/fabric/protos"
+)
+
+// PeerChaincodeStream interface for stream between Peer and chaincode instance.
+type inProcStream struct {
+ recv <-chan *pb.ChaincodeMessage
+ send chan<- *pb.ChaincodeMessage
+}
+
+func newInProcStream(recv <-chan *pb.ChaincodeMessage, send chan<- *pb.ChaincodeMessage) *inProcStream {
+ return &inProcStream{recv, send}
+}
+
+func (s *inProcStream) Send(msg *pb.ChaincodeMessage) error {
+ s.send <- msg
+ return nil
+}
+
+func (s *inProcStream) Recv() (*pb.ChaincodeMessage, error) {
+ msg := <-s.recv
+ return msg, nil
+}
diff --git a/core/container/util/dockerutil.go b/core/container/util/dockerutil.go
new file mode 100644
index 00000000000..fb650c81bb9
--- /dev/null
+++ b/core/container/util/dockerutil.go
@@ -0,0 +1,37 @@
+/*
+Copyright IBM Corp. 2016 All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package util
+
+import (
+ "github.com/fsouza/go-dockerclient"
+ "github.com/spf13/viper"
+)
+
+//NewDockerClient creates a docker client
+func NewDockerClient() (client *docker.Client, err error) {
+ endpoint := viper.GetString("vm.endpoint")
+ tlsenabled := viper.GetBool("vm.docker.tls.enabled")
+ if tlsenabled {
+ cert := viper.GetString("vm.docker.tls.cert.file")
+ key := viper.GetString("vm.docker.tls.key.file")
+ ca := viper.GetString("vm.docker.tls.ca.file")
+ client, err = docker.NewTLSClient(endpoint, cert, key, ca)
+ } else {
+ client, err = docker.NewClient(endpoint)
+ }
+ return
+}
diff --git a/core/container/util/writer.go b/core/container/util/writer.go
new file mode 100644
index 00000000000..e9a644c4ff6
--- /dev/null
+++ b/core/container/util/writer.go
@@ -0,0 +1,157 @@
+/*
+Copyright IBM Corp. 2016 All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package util
+
+import (
+ "archive/tar"
+ "bufio"
+ "fmt"
+ "io"
+ "os"
+ "path/filepath"
+ "strings"
+ "time"
+
+ "github.com/op/go-logging"
+ "github.com/spf13/viper"
+)
+
+var vmLogger = logging.MustGetLogger("container")
+
+var fileTypes = map[string]bool{
+ ".c": true,
+ ".h": true,
+ ".go": true,
+ ".yaml": true,
+ ".json": true,
+}
+
+//WriteGopathSrc tars up files under gopath src
+func WriteGopathSrc(tw *tar.Writer, excludeDir string) error {
+ gopath := os.Getenv("GOPATH")
+ // Only take the first element of GOPATH
+ gopath = filepath.SplitList(gopath)[0]
+
+ rootDirectory := filepath.Join(gopath, "src")
+ vmLogger.Infof("rootDirectory = %s", rootDirectory)
+
+ //append "/" if necessary
+ if excludeDir != "" && strings.LastIndex(excludeDir, "/") < len(excludeDir)-1 {
+ excludeDir = excludeDir + "/"
+ }
+
+ rootDirLen := len(rootDirectory)
+ walkFn := func(path string, info os.FileInfo, err error) error {
+
+ // If path includes .git, ignore
+ if strings.Contains(path, ".git") {
+ return nil
+ }
+
+ if info.Mode().IsDir() {
+ return nil
+ }
+
+ //exclude any files with excludeDir prefix. They should already be in the tar
+ if excludeDir != "" && strings.Index(path, excludeDir) == rootDirLen+1 {
+ //1 for "/"
+ return nil
+ }
+ // Because of scoping we can reference the external rootDirectory variable
+ if len(path[rootDirLen:]) == 0 {
+ return nil
+ }
+
+ // we only want 'fileTypes' source files at this point
+ ext := filepath.Ext(path)
+ if _, ok := fileTypes[ext]; ok != true {
+ return nil
+ }
+
+ newPath := fmt.Sprintf("src%s", path[rootDirLen:])
+ //newPath := path[len(rootDirectory):]
+
+ err = WriteFileToPackage(path, newPath, tw)
+ if err != nil {
+ return fmt.Errorf("Error writing file to package: %s", err)
+ }
+
+ return nil
+ }
+
+ if err := filepath.Walk(rootDirectory, walkFn); err != nil {
+ vmLogger.Infof("Error walking rootDirectory: %s", err)
+ return err
+ }
+
+ // Add the certificates to tar
+ if viper.GetBool("peer.tls.enabled") {
+ err := WriteFileToPackage(viper.GetString("peer.tls.cert.file"), "src/certs/cert.pem", tw)
+ if err != nil {
+ return fmt.Errorf("Error writing cert file to package: %s", err)
+ }
+ }
+
+ // Write the tar file out
+ if err := tw.Close(); err != nil {
+ return err
+ }
+ //ioutil.WriteFile("/tmp/chaincode_deployment.tar", inputbuf.Bytes(), 0644)
+ return nil
+}
+
+//WriteFileToPackage writes a file to the tarball
+func WriteFileToPackage(localpath string, packagepath string, tw *tar.Writer) error {
+ fd, err := os.Open(localpath)
+ if err != nil {
+ return fmt.Errorf("%s: %s", localpath, err)
+ }
+ defer fd.Close()
+
+ is := bufio.NewReader(fd)
+ return WriteStreamToPackage(is, localpath, packagepath, tw)
+
+}
+
+//WriteStreamToPackage writes bytes (from a file reader) to the tarball
+func WriteStreamToPackage(is io.Reader, localpath string, packagepath string, tw *tar.Writer) error {
+ info, err := os.Stat(localpath)
+ if err != nil {
+ return fmt.Errorf("%s: %s", localpath, err)
+ }
+ header, err := tar.FileInfoHeader(info, localpath)
+ if err != nil {
+ return fmt.Errorf("Error getting FileInfoHeader: %s", err)
+ }
+
+ //Let's take the variance out of the tar, make headers identical by using zero time
+ oldname := header.Name
+ var zeroTime time.Time
+ header.AccessTime = zeroTime
+ header.ModTime = zeroTime
+ header.ChangeTime = zeroTime
+ header.Name = packagepath
+
+ if err = tw.WriteHeader(header); err != nil {
+ return fmt.Errorf("Error write header for (path: %s, oldname:%s,newname:%s,sz:%d) : %s", localpath, oldname, packagepath, header.Size, err)
+ }
+ if _, err := io.Copy(tw, is); err != nil {
+ return fmt.Errorf("Error copy (path: %s, oldname:%s,newname:%s,sz:%d) : %s", localpath, oldname, packagepath, header.Size, err)
+ }
+
+ return nil
+}
diff --git a/core/container/vm.go b/core/container/vm.go
new file mode 100644
index 00000000000..9075d1c1d2e
--- /dev/null
+++ b/core/container/vm.go
@@ -0,0 +1,129 @@
+/*
+Copyright IBM Corp. 2016 All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package container
+
+import (
+ "archive/tar"
+ "bytes"
+ "compress/gzip"
+ "fmt"
+
+ "golang.org/x/net/context"
+
+ "github.com/fsouza/go-dockerclient"
+ "github.com/hyperledger/fabric/core/chaincode/platforms"
+ cutil "github.com/hyperledger/fabric/core/container/util"
+ pb "github.com/hyperledger/fabric/protos"
+ "github.com/op/go-logging"
+)
+
+// VM implemenation of VM management functionality.
+type VM struct {
+ Client *docker.Client
+}
+
+// NewVM creates a new VM instance.
+func NewVM() (*VM, error) {
+ client, err := cutil.NewDockerClient()
+ if err != nil {
+ return nil, err
+ }
+ VM := &VM{Client: client}
+ return VM, nil
+}
+
+var vmLogger = logging.MustGetLogger("container")
+
+// ListImages list the images available
+func (vm *VM) ListImages(context context.Context) error {
+ imgs, err := vm.Client.ListImages(docker.ListImagesOptions{All: false})
+ if err != nil {
+ return err
+ }
+ for _, img := range imgs {
+ fmt.Println("ID: ", img.ID)
+ fmt.Println("RepoTags: ", img.RepoTags)
+ fmt.Println("Created: ", img.Created)
+ fmt.Println("Size: ", img.Size)
+ fmt.Println("VirtualSize: ", img.VirtualSize)
+ fmt.Println("ParentId: ", img.ParentID)
+ }
+
+ return nil
+}
+
+// BuildChaincodeContainer builds the container for the supplied chaincode specification
+func (vm *VM) BuildChaincodeContainer(spec *pb.ChaincodeSpec) ([]byte, error) {
+ chaincodePkgBytes, err := GetChaincodePackageBytes(spec)
+ if err != nil {
+ return nil, fmt.Errorf("Error getting chaincode package bytes: %s", err)
+ }
+ err = vm.buildChaincodeContainerUsingDockerfilePackageBytes(spec, chaincodePkgBytes)
+ if err != nil {
+ return nil, fmt.Errorf("Error building Chaincode container: %s", err)
+ }
+ return chaincodePkgBytes, nil
+}
+
+// GetChaincodePackageBytes creates bytes for docker container generation using the supplied chaincode specification
+func GetChaincodePackageBytes(spec *pb.ChaincodeSpec) ([]byte, error) {
+ if spec == nil || spec.ChaincodeID == nil {
+ return nil, fmt.Errorf("invalid chaincode spec")
+ }
+
+ inputbuf := bytes.NewBuffer(nil)
+ gw := gzip.NewWriter(inputbuf)
+ tw := tar.NewWriter(gw)
+
+ platform, err := platforms.Find(spec.Type)
+ if err != nil {
+ return nil, err
+ }
+
+ err = platform.WritePackage(spec, tw)
+ if err != nil {
+ return nil, err
+ }
+
+ tw.Close()
+ gw.Close()
+
+ if err != nil {
+ return nil, err
+ }
+
+ chaincodePkgBytes := inputbuf.Bytes()
+
+ return chaincodePkgBytes, nil
+}
+
+// Builds the Chaincode image using the supplied Dockerfile package contents
+func (vm *VM) buildChaincodeContainerUsingDockerfilePackageBytes(spec *pb.ChaincodeSpec, code []byte) error {
+ outputbuf := bytes.NewBuffer(nil)
+ vmName := spec.ChaincodeID.Name
+ inputbuf := bytes.NewReader(code)
+ opts := docker.BuildImageOptions{
+ Name: vmName,
+ InputStream: inputbuf,
+ OutputStream: outputbuf,
+ }
+ if err := vm.Client.BuildImage(opts); err != nil {
+ vmLogger.Errorf("Failed Chaincode docker build:\n%s\n", outputbuf.String())
+ return fmt.Errorf("Error building Chaincode container: %s", err)
+ }
+ return nil
+}
diff --git a/core/container/vm_test.go b/core/container/vm_test.go
new file mode 100644
index 00000000000..7f75248d202
--- /dev/null
+++ b/core/container/vm_test.go
@@ -0,0 +1,113 @@
+/*
+Copyright IBM Corp. 2016 All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package container
+
+import (
+ "archive/tar"
+ "bytes"
+ "flag"
+ "io/ioutil"
+ "os"
+ "testing"
+
+ cutil "github.com/hyperledger/fabric/core/container/util"
+ pb "github.com/hyperledger/fabric/protos"
+ "golang.org/x/net/context"
+)
+
+func TestMain(m *testing.M) {
+ flag.BoolVar(&runTests, "run-controller-tests", false, "run tests")
+ flag.Parse()
+ SetupTestConfig()
+ os.Exit(m.Run())
+}
+
+func TestVM_ListImages(t *testing.T) {
+ t.Skip("No need to invoke list images.")
+ vm, err := NewVM()
+ if err != nil {
+ t.Fail()
+ t.Logf("Error getting VM: %s", err)
+ }
+ err = vm.ListImages(context.TODO())
+ if err != nil {
+ t.Fail()
+ t.Logf("Error listing images: %s", err)
+ }
+}
+
+func TestVM_BuildImage_WritingGopathSource(t *testing.T) {
+ t.Skip("This can be re-enabled if testing GOPATH writing to tar image.")
+ inputbuf := bytes.NewBuffer(nil)
+ tw := tar.NewWriter(inputbuf)
+
+ err := cutil.WriteGopathSrc(tw, "")
+ if err != nil {
+ t.Fail()
+ t.Logf("Error writing gopath src: %s", err)
+ }
+ ioutil.WriteFile("/tmp/chaincode_deployment.tar", inputbuf.Bytes(), 0644)
+
+}
+
+func TestVM_BuildImage_ChaincodeLocal(t *testing.T) {
+ vm, err := NewVM()
+ if err != nil {
+ t.Fail()
+ t.Logf("Error getting VM: %s", err)
+ return
+ }
+ // Build the spec
+ chaincodePath := "github.com/hyperledger/fabric/examples/chaincode/go/chaincode_example01"
+ spec := &pb.ChaincodeSpec{Type: pb.ChaincodeSpec_GOLANG, ChaincodeID: &pb.ChaincodeID{Path: chaincodePath}, CtorMsg: &pb.ChaincodeInput{Function: "f"}}
+ if _, err := vm.BuildChaincodeContainer(spec); err != nil {
+ t.Fail()
+ t.Log(err)
+ }
+}
+
+func TestVM_BuildImage_ChaincodeRemote(t *testing.T) {
+ t.Skip("Works but needs user credentials. Not suitable for automated unit tests as is")
+ vm, err := NewVM()
+ if err != nil {
+ t.Fail()
+ t.Logf("Error getting VM: %s", err)
+ return
+ }
+ // Build the spec
+ chaincodePath := "https://github.com/prjayach/chaincode_examples/chaincode_example02"
+ spec := &pb.ChaincodeSpec{Type: pb.ChaincodeSpec_GOLANG, ChaincodeID: &pb.ChaincodeID{Path: chaincodePath}, CtorMsg: &pb.ChaincodeInput{Function: "f"}}
+ if _, err := vm.BuildChaincodeContainer(spec); err != nil {
+ t.Fail()
+ t.Log(err)
+ }
+}
+
+func TestVM_Chaincode_Compile(t *testing.T) {
+ // vm, err := NewVM()
+ // if err != nil {
+ // t.Fail()
+ // t.Logf("Error getting VM: %s", err)
+ // return
+ // }
+
+ // if err := vm.BuildPeerContainer(); err != nil {
+ // t.Fail()
+ // t.Log(err)
+ // }
+ t.Skip("NOT IMPLEMENTED")
+}
diff --git a/core/crypto/attributes/attributes.go b/core/crypto/attributes/attributes.go
new file mode 100644
index 00000000000..557453eff9f
--- /dev/null
+++ b/core/crypto/attributes/attributes.go
@@ -0,0 +1,273 @@
+/*
+Copyright IBM Corp. 2016 All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package attributes
+
+import (
+ "bytes"
+ "crypto/x509"
+ "encoding/asn1"
+ "errors"
+ "fmt"
+ "strconv"
+ "strings"
+
+ pb "github.com/hyperledger/fabric/core/crypto/attributes/proto"
+ "github.com/hyperledger/fabric/core/crypto/primitives"
+
+ "github.com/golang/protobuf/proto"
+)
+
+var (
+ // TCertEncAttributesBase is the base ASN1 object identifier for attributes.
+ // When generating an extension to include the attribute an index will be
+ // appended to this Object Identifier.
+ TCertEncAttributesBase = asn1.ObjectIdentifier{1, 2, 3, 4, 5, 6}
+
+ // TCertAttributesHeaders is the ASN1 object identifier of attributes header.
+ TCertAttributesHeaders = asn1.ObjectIdentifier{1, 2, 3, 4, 5, 6, 9}
+
+ padding = []byte{255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255}
+
+ //headerPrefix is the prefix used in the header exteion of the certificate.
+ headerPrefix = "00HEAD"
+
+ //HeaderAttributeName is the name used to derivate the K used to encrypt/decrypt the header.
+ HeaderAttributeName = "attributeHeader"
+)
+
+//ParseAttributesHeader parses a string and returns a map with the attributes.
+func ParseAttributesHeader(header string) (map[string]int, error) {
+ if !strings.HasPrefix(header, headerPrefix) {
+ return nil, errors.New("Invalid header")
+ }
+ headerBody := strings.Replace(header, headerPrefix, "", 1)
+ tokens := strings.Split(headerBody, "#")
+ result := make(map[string]int)
+
+ for _, token := range tokens {
+ pair := strings.Split(token, "->")
+
+ if len(pair) == 2 {
+ key := pair[0]
+ valueStr := pair[1]
+ value, err := strconv.Atoi(valueStr)
+ if err != nil {
+ return nil, err
+ }
+ result[key] = value
+ }
+ }
+
+ return result, nil
+}
+
+//ReadAttributeHeader read the header of the attributes.
+func ReadAttributeHeader(tcert *x509.Certificate, headerKey []byte) (map[string]int, bool, error) {
+ var err error
+ var headerRaw []byte
+ encrypted := false
+ if headerRaw, err = primitives.GetCriticalExtension(tcert, TCertAttributesHeaders); err != nil {
+ return nil, encrypted, err
+ }
+ headerStr := string(headerRaw)
+ var header map[string]int
+ header, err = ParseAttributesHeader(headerStr)
+ if err != nil {
+ if headerKey == nil {
+ return nil, false, errors.New("Is not possible read an attribute encrypted without the headerKey")
+ }
+ headerRaw, err = DecryptAttributeValue(headerKey, headerRaw)
+
+ if err != nil {
+ return nil, encrypted, errors.New("error decrypting header value '" + err.Error() + "''")
+ }
+ headerStr = string(headerRaw)
+ header, err = ParseAttributesHeader(headerStr)
+ if err != nil {
+ return nil, encrypted, err
+ }
+ encrypted = true
+ }
+ return header, encrypted, nil
+}
+
+//ReadTCertAttributeByPosition read the attribute stored in the position "position" of the tcert.
+func ReadTCertAttributeByPosition(tcert *x509.Certificate, position int) ([]byte, error) {
+ if position <= 0 {
+ return nil, fmt.Errorf("Invalid attribute position. Received [%v]", position)
+ }
+
+ oid := asn1.ObjectIdentifier{1, 2, 3, 4, 5, 6, 9 + position}
+ value, err := primitives.GetCriticalExtension(tcert, oid)
+ if err != nil {
+ return nil, err
+ }
+ return value, nil
+}
+
+//ReadTCertAttribute read the attribute with name "attributeName" and returns the value and a boolean indicating if the returned value is encrypted or not.
+func ReadTCertAttribute(tcert *x509.Certificate, attributeName string, headerKey []byte) ([]byte, bool, error) {
+ header, encrypted, err := ReadAttributeHeader(tcert, headerKey)
+ if err != nil {
+ return nil, false, err
+ }
+ position := header[attributeName]
+ if position == 0 {
+ return nil, encrypted, errors.New("Failed attribute '" + attributeName + "' doesn't exists in the TCert.")
+ }
+ value, err := ReadTCertAttributeByPosition(tcert, position)
+ if err != nil {
+ return nil, encrypted, err
+ }
+ return value, encrypted, nil
+}
+
+//EncryptAttributeValue encrypts "attributeValue" using "attributeKey"
+func EncryptAttributeValue(attributeKey []byte, attributeValue []byte) ([]byte, error) {
+ value := append(attributeValue, padding...)
+ return primitives.CBCPKCS7Encrypt(attributeKey, value)
+}
+
+//getAttributeKey returns the attributeKey derived from the preK0 to the attributeName.
+func getAttributeKey(preK0 []byte, attributeName string) []byte {
+ return primitives.HMACTruncated(preK0, []byte(attributeName), 32)
+}
+
+//EncryptAttributeValuePK0 encrypts "attributeValue" using a key derived from preK0.
+func EncryptAttributeValuePK0(preK0 []byte, attributeName string, attributeValue []byte) ([]byte, error) {
+ attributeKey := getAttributeKey(preK0, attributeName)
+ return EncryptAttributeValue(attributeKey, attributeValue)
+}
+
+//DecryptAttributeValue decrypts "encryptedValue" using "attributeKey" and return the decrypted value.
+func DecryptAttributeValue(attributeKey []byte, encryptedValue []byte) ([]byte, error) {
+ value, err := primitives.CBCPKCS7Decrypt(attributeKey, encryptedValue)
+ if err != nil {
+ return nil, err
+ }
+ lenPadding := len(padding)
+ lenValue := len(value)
+ if lenValue < lenPadding {
+ return nil, errors.New("Error invalid value. Decryption verification failed.")
+ }
+ lenWithoutPadding := lenValue - lenPadding
+ if bytes.Compare(padding[0:lenPadding], value[lenWithoutPadding:lenValue]) != 0 {
+ return nil, errors.New("Error generating decryption key for value. Decryption verification failed.")
+ }
+ value = value[0:lenWithoutPadding]
+ return value, nil
+}
+
+//getKAndValueForAttribute derives K for the attribute "attributeName", checks the value padding and returns both key and decrypted value
+func getKAndValueForAttribute(attributeName string, preK0 []byte, cert *x509.Certificate) ([]byte, []byte, error) {
+ headerKey := getAttributeKey(preK0, HeaderAttributeName)
+ value, encrypted, err := ReadTCertAttribute(cert, attributeName, headerKey)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ attributeKey := getAttributeKey(preK0, attributeName)
+ if encrypted {
+ value, err = DecryptAttributeValue(attributeKey, value)
+ if err != nil {
+ return nil, nil, err
+ }
+ }
+ return attributeKey, value, nil
+}
+
+//GetKForAttribute derives the K for the attribute "attributeName" and returns the key
+func GetKForAttribute(attributeName string, preK0 []byte, cert *x509.Certificate) ([]byte, error) {
+ key, _, err := getKAndValueForAttribute(attributeName, preK0, cert)
+ return key, err
+}
+
+//GetValueForAttribute derives the K for the attribute "attributeName" and returns the value
+func GetValueForAttribute(attributeName string, preK0 []byte, cert *x509.Certificate) ([]byte, error) {
+ _, value, err := getKAndValueForAttribute(attributeName, preK0, cert)
+ return value, err
+}
+
+func createAttributesHeaderEntry(preK0 []byte) *pb.AttributesMetadataEntry {
+ attKey := getAttributeKey(preK0, HeaderAttributeName)
+ return &pb.AttributesMetadataEntry{AttributeName: HeaderAttributeName, AttributeKey: attKey}
+}
+
+func createAttributesMetadataEntry(attributeName string, preK0 []byte) *pb.AttributesMetadataEntry {
+ attKey := getAttributeKey(preK0, attributeName)
+ return &pb.AttributesMetadataEntry{AttributeName: attributeName, AttributeKey: attKey}
+}
+
+//CreateAttributesMetadataObjectFromCert creates an AttributesMetadata object from certificate "cert", metadata and the attributes keys.
+func CreateAttributesMetadataObjectFromCert(cert *x509.Certificate, metadata []byte, preK0 []byte, attributeKeys []string) *pb.AttributesMetadata {
+ var entries []*pb.AttributesMetadataEntry
+ for _, key := range attributeKeys {
+ if len(key) == 0 {
+ continue
+ }
+
+ entry := createAttributesMetadataEntry(key, preK0)
+ entries = append(entries, entry)
+ }
+ headerEntry := createAttributesHeaderEntry(preK0)
+ entries = append(entries, headerEntry)
+
+ return &pb.AttributesMetadata{Metadata: metadata, Entries: entries}
+}
+
+//CreateAttributesMetadataFromCert creates the AttributesMetadata from the original metadata and certificate "cert".
+func CreateAttributesMetadataFromCert(cert *x509.Certificate, metadata []byte, preK0 []byte, attributeKeys []string) ([]byte, error) {
+ attributesMetadata := CreateAttributesMetadataObjectFromCert(cert, metadata, preK0, attributeKeys)
+
+ return proto.Marshal(attributesMetadata)
+}
+
+//CreateAttributesMetadata create the AttributesMetadata from the original metadata
+func CreateAttributesMetadata(raw []byte, metadata []byte, preK0 []byte, attributeKeys []string) ([]byte, error) {
+ cert, err := primitives.DERToX509Certificate(raw)
+ if err != nil {
+ return nil, err
+ }
+
+ return CreateAttributesMetadataFromCert(cert, metadata, preK0, attributeKeys)
+}
+
+//GetAttributesMetadata object from the original metadata "metadata".
+func GetAttributesMetadata(metadata []byte) (*pb.AttributesMetadata, error) {
+ attributesMetadata := &pb.AttributesMetadata{}
+ err := proto.Unmarshal(metadata, attributesMetadata)
+ return attributesMetadata, err
+}
+
+//BuildAttributesHeader builds a header attribute from a map of attribute names and positions.
+func BuildAttributesHeader(attributesHeader map[string]int) ([]byte, error) {
+ var header []byte
+ var headerString string
+ var positions = make(map[int]bool)
+
+ for k, v := range attributesHeader {
+ if positions[v] {
+ return nil, errors.New("Duplicated position found in attributes header")
+ }
+ positions[v] = true
+
+ vStr := strconv.Itoa(v)
+ headerString = headerString + k + "->" + vStr + "#"
+ }
+ header = []byte(headerPrefix + headerString)
+ return header, nil
+}
diff --git a/core/crypto/attributes/attributes_test.go b/core/crypto/attributes/attributes_test.go
new file mode 100644
index 00000000000..c80b7dd9eac
--- /dev/null
+++ b/core/crypto/attributes/attributes_test.go
@@ -0,0 +1,545 @@
+/*
+Copyright IBM Corp. 2016 All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package attributes
+
+import (
+ "bytes"
+ "crypto/x509"
+ "encoding/pem"
+ "fmt"
+
+ "io/ioutil"
+ "os"
+ "strings"
+ "testing"
+
+ "github.com/golang/protobuf/proto"
+ pb "github.com/hyperledger/fabric/core/crypto/attributes/proto"
+ "github.com/hyperledger/fabric/core/crypto/primitives"
+)
+
+func TestMain(m *testing.M) {
+ if err := primitives.InitSecurityLevel("SHA3", 256); err != nil {
+ fmt.Printf("Failed setting security level: %v", err)
+ }
+
+ ret := m.Run()
+ os.Exit(ret)
+}
+
+func TestEncryptDecryptAttributeValuePK0(t *testing.T) {
+ expected := "ACompany"
+
+ preK0 := []byte{
+ 91, 206, 163, 104, 247, 74, 149, 209, 91, 137, 215, 236,
+ 84, 135, 9, 70, 160, 138, 89, 163, 240, 223, 83, 164, 58,
+ 208, 199, 23, 221, 123, 53, 220, 15, 41, 28, 111, 166,
+ 28, 29, 187, 97, 229, 117, 117, 49, 192, 134, 31, 151}
+
+ encryptedAttribute, err := EncryptAttributeValuePK0(preK0, "company", []byte(expected))
+ if err != nil {
+ t.Error(err)
+ }
+
+ attributeKey := getAttributeKey(preK0, "company")
+
+ attribute, err := DecryptAttributeValue(attributeKey, encryptedAttribute)
+ if err != nil {
+ t.Error(err)
+ }
+
+ if string(attribute) != expected {
+ t.Errorf("Failed decrypting attribute. Expected: %v, Actual: %v", expected, attribute)
+ }
+}
+
+func TestGetKAndValueForAttribute(t *testing.T) {
+ expected := "Software Engineer"
+
+ tcert, prek0, err := loadTCertAndPreK0()
+ if err != nil {
+ t.Error(err)
+ }
+
+ _, attribute, err := getKAndValueForAttribute("position", prek0, tcert)
+ if err != nil {
+ t.Error(err)
+ }
+
+ if string(attribute) != expected {
+ t.Errorf("Failed retrieving attribute value from TCert. Expected: %v, Actual: %v", expected, string(attribute))
+ }
+}
+
+func TestGetKAndValueForAttribute_MissingAttribute(t *testing.T) {
+ tcert, prek0, err := loadTCertAndPreK0()
+ if err != nil {
+ t.Error(err)
+ }
+
+ _, _, err = getKAndValueForAttribute("business_unit", prek0, tcert)
+ if err == nil {
+ t.Errorf("Trying to read an attribute that is not part of the TCert should produce an error")
+ }
+}
+
+func TestGetValueForAttribute(t *testing.T) {
+ expected := "Software Engineer"
+
+ tcert, prek0, err := loadTCertAndPreK0()
+ if err != nil {
+ t.Error(err)
+ }
+
+ value, err := GetValueForAttribute("position", prek0, tcert)
+ if err != nil {
+ t.Error(err)
+ }
+
+ if string(value) != expected {
+ t.Errorf("Failed retrieving attribute value from TCert. Expected: %v, Actual: %v", expected, string(value))
+ }
+}
+
+func TestGetValueForAttribute_MissingAttribute(t *testing.T) {
+ tcert, prek0, err := loadTCertAndPreK0()
+ if err != nil {
+ t.Error(err)
+ }
+
+ _, err = GetValueForAttribute("business_unit", prek0, tcert)
+ if err == nil {
+ t.Errorf("Trying to read an attribute that is not part of the TCert should produce an error")
+ }
+}
+
+func TestGetKForAttribute(t *testing.T) {
+ expected := "Software Engineer"
+
+ tcert, prek0, err := loadTCertAndPreK0()
+ if err != nil {
+ t.Error(err)
+ }
+
+ key, err := GetKForAttribute("position", prek0, tcert)
+ if err != nil {
+ t.Error(err)
+ }
+
+ encryptedValue, err := EncryptAttributeValuePK0(prek0, "position", []byte(expected))
+ if err != nil {
+ t.Error(err)
+ }
+
+ decryptedValue, err := DecryptAttributeValue(key, encryptedValue)
+ if err != nil {
+ t.Error(err)
+ }
+
+ if string(decryptedValue) != expected {
+ t.Errorf("Failed decrypting attribute used calculated key. Expected: %v, Actual: %v", expected, string(decryptedValue))
+ }
+}
+
+func TestGetKForAttribute_MissingAttribute(t *testing.T) {
+ tcert, prek0, err := loadTCertAndPreK0()
+ if err != nil {
+ t.Error(err)
+ }
+
+ _, err = GetKForAttribute("business_unit", prek0, tcert)
+ if err == nil {
+ t.Errorf("Trying to get a key for an attribute that is not part of the TCert should produce an error")
+ }
+}
+
+func TestParseEmptyAttributesHeader(t *testing.T) {
+ _, err := ParseAttributesHeader("")
+ if err == nil {
+ t.Error("Empty header should produce a parsing error")
+ }
+}
+
+func TestParseAttributesHeader_NotNumberPosition(t *testing.T) {
+ _, err := ParseAttributesHeader(headerPrefix + "position->a#")
+ if err == nil {
+ t.Error("Not number position in the header should produce a parsing error")
+ }
+}
+
+func TestBuildAndParseAttributesHeader(t *testing.T) {
+ attributes := make(map[string]int)
+ attributes["company"] = 1
+ attributes["position"] = 2
+
+ headerRaw, err := BuildAttributesHeader(attributes)
+ if err != nil {
+ t.Error(err)
+ }
+ header := string(headerRaw[:])
+
+ components, err := ParseAttributesHeader(header)
+ if err != nil {
+ t.Error(err)
+ }
+
+ if len(components) != 2 {
+ t.Errorf("Error parsing header. Expecting two entries in header, found %v instead", len(components))
+ }
+
+ if components["company"] != 1 {
+ t.Errorf("Error parsing header. Expected %v with value %v, found %v instead", "company", 1, components["company"])
+ }
+
+ if components["position"] != 2 {
+ t.Errorf("Error parsing header. Expected %v with value %v, found %v instead", "position", 2, components["position"])
+ }
+}
+
+func TestReadAttributeHeader(t *testing.T) {
+ tcert, prek0, err := loadTCertAndPreK0()
+ if err != nil {
+ t.Error(err)
+ }
+
+ headerKey := getAttributeKey(prek0, HeaderAttributeName)
+
+ header, encrypted, err := ReadAttributeHeader(tcert, headerKey)
+
+ if err != nil {
+ t.Error(err)
+ }
+
+ if !encrypted {
+ t.Errorf("Error parsing header. Expecting encrypted header.")
+ }
+
+ if len(header) != 1 {
+ t.Errorf("Error parsing header. Expecting %v entries in header, found %v instead", 1, len(header))
+ }
+
+ if header["position"] != 1 {
+ t.Errorf("Error parsing header. Expected %v with value %v, found %v instead", "position", 1, header["position"])
+ }
+}
+
+func TestReadAttributeHeader_WithoutHeaderKey(t *testing.T) {
+ tcert, _, err := loadTCertAndPreK0()
+ if err != nil {
+ t.Error(err)
+ }
+
+ _, _, err = ReadAttributeHeader(tcert, nil)
+
+ if err == nil {
+ t.Error(err)
+ }
+}
+
+func TestReadAttributeHeader_InvalidHeaderKey(t *testing.T) {
+ tcert, prek0, err := loadTCertAndPreK0()
+ if err != nil {
+ t.Error(err)
+ }
+
+ headerKey := getAttributeKey(prek0, HeaderAttributeName+"_invalid")
+
+ _, _, err = ReadAttributeHeader(tcert, headerKey)
+
+ if err == nil {
+ t.Error(err)
+ }
+}
+
+func TestReadTCertAttributeByPosition(t *testing.T) {
+ expected := "Software Engineer"
+
+ tcert, prek0, err := loadTCertAndPreK0()
+ if err != nil {
+ t.Error(err)
+ }
+
+ encryptedAttribute, err := ReadTCertAttributeByPosition(tcert, 1)
+
+ if err != nil {
+ t.Error(err)
+ }
+
+ attributeKey := getAttributeKey(prek0, "position")
+
+ attribute, err := DecryptAttributeValue(attributeKey, encryptedAttribute)
+
+ if err != nil {
+ t.Error(err)
+ }
+
+ if string(attribute) != expected {
+ t.Errorf("Failed retrieving attribute value from TCert. Expected: %v, Actual: %v", expected, string(attribute))
+ }
+}
+
+func TestGetAttributesMetadata(t *testing.T) {
+ metadata := []byte{255, 255, 255, 255}
+ entries := make([]*pb.AttributesMetadataEntry, 1)
+ var entry pb.AttributesMetadataEntry
+ entry.AttributeName = "position"
+ entry.AttributeKey = []byte{0, 0, 0, 0}
+ entries[0] = &entry
+ attributesMetadata := pb.AttributesMetadata{Metadata: metadata, Entries: entries}
+ raw, err := proto.Marshal(&attributesMetadata)
+ if err != nil {
+ t.Error(err)
+ }
+ resultMetadata, err := GetAttributesMetadata(raw)
+ if err != nil {
+ t.Error(err)
+ }
+ if bytes.Compare(resultMetadata.Metadata, attributesMetadata.Metadata) != 0 {
+ t.Fatalf("Invalid metadata expected %v result %v", attributesMetadata.Metadata, resultMetadata.Metadata)
+ }
+ if resultMetadata.Entries[0].AttributeName != attributesMetadata.Entries[0].AttributeName {
+ t.Fatalf("Invalid first entry attribute name expected %v result %v", attributesMetadata.Entries[0].AttributeName, resultMetadata.Entries[0].AttributeName)
+ }
+ if bytes.Compare(resultMetadata.Entries[0].AttributeKey, attributesMetadata.Entries[0].AttributeKey) != 0 {
+ t.Fatalf("Invalid first entry attribute key expected %v result %v", attributesMetadata.Entries[0].AttributeKey, resultMetadata.Entries[0].AttributeKey)
+ }
+}
+
+func TestReadTCertAttributeByPosition_InvalidPositions(t *testing.T) {
+ tcert, _, err := loadTCertAndPreK0()
+ if err != nil {
+ t.Error(err)
+ }
+
+ _, err = ReadTCertAttributeByPosition(tcert, 2)
+
+ if err == nil {
+ t.Error("Test should have failed since there is no attribute in the position 2 of the TCert")
+ }
+
+ _, err = ReadTCertAttributeByPosition(tcert, -2)
+
+ if err == nil {
+ t.Error("Test should have failed since attribute positions should be positive integer values")
+ }
+}
+
+func TestCreateAttributesMetadataObjectFromCert(t *testing.T) {
+ tcert, preK0, err := loadTCertAndPreK0()
+ if err != nil {
+ t.Error(err)
+ }
+
+ metadata := []byte{255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255}
+ attributeKeys := []string{"position"}
+ metadataObj := CreateAttributesMetadataObjectFromCert(tcert, metadata, preK0, attributeKeys)
+ if bytes.Compare(metadataObj.Metadata, metadata) != 0 {
+ t.Errorf("Invalid metadata result %v but expected %v", metadataObj.Metadata, metadata)
+ }
+
+ entries := metadataObj.GetEntries()
+ if len(entries) != 2 {
+ t.Errorf("Invalid entries in metadata result %v but expected %v", len(entries), 3)
+ }
+
+ firstEntry := entries[0]
+ if firstEntry.AttributeName != "position" {
+ t.Errorf("Invalid first attribute name, this has to be %v but is %v", "position", firstEntry.AttributeName)
+ }
+ firstKey, err := GetKForAttribute("position", preK0, tcert)
+ if err != nil {
+ t.Error(err)
+ }
+
+ if bytes.Compare(firstKey, firstEntry.AttributeKey) != 0 {
+ t.Errorf("Invalid K for first attribute expected %v but returned %v", firstKey, firstEntry.AttributeKey)
+ }
+}
+
+func TestCreateAttributesMetadata(t *testing.T) {
+ tcert, preK0, err := loadTCertAndPreK0()
+
+ if err != nil {
+ t.Error(err)
+ }
+ tcertRaw := tcert.Raw
+ metadata := []byte{255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255}
+ attributeKeys := []string{"position"}
+ metadataObjRaw, err := CreateAttributesMetadata(tcertRaw, metadata, preK0, attributeKeys)
+ if err != nil {
+ t.Error(err)
+ }
+
+ var metadataObj pb.AttributesMetadata
+ err = proto.Unmarshal(metadataObjRaw, &metadataObj)
+ if err != nil {
+ t.Error(err)
+ }
+
+ if bytes.Compare(metadataObj.Metadata, metadata) != 0 {
+ t.Errorf("Invalid metadata result %v but expected %v", metadataObj.Metadata, metadata)
+ }
+
+ entries := metadataObj.GetEntries()
+ if len(entries) != 2 {
+ t.Errorf("Invalid entries in metadata result %v but expected %v", len(entries), 3)
+ }
+
+ firstEntry := entries[0]
+ if firstEntry.AttributeName != "position" {
+ t.Errorf("Invalid first attribute name, this has to be %v but is %v", "position", firstEntry.AttributeName)
+ }
+ firstKey, err := GetKForAttribute("position", preK0, tcert)
+ if err != nil {
+ t.Error(err)
+ }
+
+ if bytes.Compare(firstKey, firstEntry.AttributeKey) != 0 {
+ t.Errorf("Invalid K for first attribute expected %v but returned %v", firstKey, firstEntry.AttributeKey)
+ }
+}
+
+func TestCreateAttributesMetadata_AttributeNotFound(t *testing.T) {
+ tcert, preK0, err := loadTCertAndPreK0()
+
+ if err != nil {
+ t.Error(err)
+ }
+ tcertRaw := tcert.Raw
+ metadata := []byte{255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255}
+ attributeKeys := []string{"company"}
+ metadataObjRaw, err := CreateAttributesMetadata(tcertRaw, metadata, preK0, attributeKeys)
+ if err != nil {
+ t.Error(err)
+ }
+
+ var metadataObj pb.AttributesMetadata
+ err = proto.Unmarshal(metadataObjRaw, &metadataObj)
+ if err != nil {
+ t.Error(err)
+ }
+ if bytes.Compare(metadataObj.Metadata, metadata) != 0 {
+ t.Errorf("Invalid metadata result %v but expected %v", metadataObj.Metadata, metadata)
+ }
+
+ entries := metadataObj.GetEntries()
+ if len(entries) != 2 {
+ t.Errorf("Invalid entries in metadata result %v but expected %v", len(entries), 3)
+ }
+
+ firstEntry := entries[0]
+ if firstEntry.AttributeName != "company" {
+ t.Errorf("Invalid first attribute name, this has to be %v but is %v", "position", firstEntry.AttributeName)
+ }
+ _, err = GetKForAttribute("company", preK0, tcert)
+ if err == nil {
+ t.Fatalf("Test should faild because company is not included within the TCert.")
+ }
+}
+
+func TestCreateAttributesMetadataObjectFromCert_AttributeNotFound(t *testing.T) {
+ tcert, preK0, err := loadTCertAndPreK0()
+ if err != nil {
+ t.Error(err)
+ }
+
+ metadata := []byte{255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255}
+ attributeKeys := []string{"company"}
+ metadataObj := CreateAttributesMetadataObjectFromCert(tcert, metadata, preK0, attributeKeys)
+ if bytes.Compare(metadataObj.Metadata, metadata) != 0 {
+ t.Errorf("Invalid metadata result %v but expected %v", metadataObj.Metadata, metadata)
+ }
+
+ entries := metadataObj.GetEntries()
+ if len(entries) != 2 {
+ t.Errorf("Invalid entries in metadata result %v but expected %v", len(entries), 3)
+ }
+
+ firstEntry := entries[0]
+ if firstEntry.AttributeName != "company" {
+ t.Errorf("Invalid first attribute name, this has to be %v but is %v", "position", firstEntry.AttributeName)
+ }
+ _, err = GetKForAttribute("company", preK0, tcert)
+ if err == nil {
+ t.Fatalf("Test should faild because company is not included within the TCert.")
+ }
+}
+
+func TestBuildAttributesHeader(t *testing.T) {
+ attributes := make(map[string]int)
+ attributes["company"] = 0
+ attributes["position"] = 1
+ attributes["country"] = 2
+ result, err := BuildAttributesHeader(attributes)
+ if err != nil {
+ t.Error(err)
+ }
+
+ resultStr := string(result)
+
+ if !strings.HasPrefix(resultStr, headerPrefix) {
+ t.Fatalf("Invalid header prefix expected %v result %v", headerPrefix, resultStr)
+ }
+
+ if !strings.Contains(resultStr, "company->0#") {
+ t.Fatalf("Invalid header shoud include '%v'", "company->0#")
+ }
+
+ if !strings.Contains(resultStr, "position->1#") {
+ t.Fatalf("Invalid header shoud include '%v'", "position->1#")
+ }
+
+ if !strings.Contains(resultStr, "country->2#") {
+ t.Fatalf("Invalid header shoud include '%v'", "country->2#")
+ }
+}
+
+func TestBuildAttributesHeader_DuplicatedPosition(t *testing.T) {
+ attributes := make(map[string]int)
+ attributes["company"] = 0
+ attributes["position"] = 0
+ attributes["country"] = 1
+ _, err := BuildAttributesHeader(attributes)
+ if err == nil {
+ t.Fatalf("Error this tests should fail because header has two attributes with the same position")
+ }
+}
+
+func loadTCertAndPreK0() (*x509.Certificate, []byte, error) {
+ preKey0, err := ioutil.ReadFile("./test_resources/prek0.dump")
+ if err != nil {
+ return nil, nil, err
+ }
+
+ if err != nil {
+ return nil, nil, err
+ }
+
+ tcertRaw, err := ioutil.ReadFile("./test_resources/tcert.dump")
+ if err != nil {
+ return nil, nil, err
+ }
+
+ tcertDecoded, _ := pem.Decode(tcertRaw)
+
+ tcert, err := x509.ParseCertificate(tcertDecoded.Bytes)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ return tcert, preKey0, nil
+}
diff --git a/core/crypto/attributes/proto/attributes.pb.go b/core/crypto/attributes/proto/attributes.pb.go
new file mode 100644
index 00000000000..57d3ac39c91
--- /dev/null
+++ b/core/crypto/attributes/proto/attributes.pb.go
@@ -0,0 +1,53 @@
+// Code generated by protoc-gen-go.
+// source: crypto/attributes/proto/attributes.proto
+// DO NOT EDIT!
+
+/*
+Package protos is a generated protocol buffer package.
+
+It is generated from these files:
+ crypto/attributes/proto/attributes.proto
+
+It has these top-level messages:
+ AttributesMetadataEntry
+ AttributesMetadata
+*/
+package protos
+
+import proto "github.com/golang/protobuf/proto"
+import fmt "fmt"
+import math "math"
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+// AttributesMetadataEntry is an entry within the metadata that store an attribute name with its respective key.
+type AttributesMetadataEntry struct {
+ AttributeName string `protobuf:"bytes,1,opt,name=AttributeName" json:"AttributeName,omitempty"`
+ AttributeKey []byte `protobuf:"bytes,2,opt,name=AttributeKey,proto3" json:"AttributeKey,omitempty"`
+}
+
+func (m *AttributesMetadataEntry) Reset() { *m = AttributesMetadataEntry{} }
+func (m *AttributesMetadataEntry) String() string { return proto.CompactTextString(m) }
+func (*AttributesMetadataEntry) ProtoMessage() {}
+
+// AttributesMetadata holds both the original metadata bytes and the metadata required to access attributes.
+type AttributesMetadata struct {
+ // Original metadata bytes
+ Metadata []byte `protobuf:"bytes,1,opt,name=Metadata,proto3" json:"Metadata,omitempty"`
+ // Entries for each attributes considered.
+ Entries []*AttributesMetadataEntry `protobuf:"bytes,2,rep,name=Entries" json:"Entries,omitempty"`
+}
+
+func (m *AttributesMetadata) Reset() { *m = AttributesMetadata{} }
+func (m *AttributesMetadata) String() string { return proto.CompactTextString(m) }
+func (*AttributesMetadata) ProtoMessage() {}
+
+func (m *AttributesMetadata) GetEntries() []*AttributesMetadataEntry {
+ if m != nil {
+ return m.Entries
+ }
+ return nil
+}
diff --git a/core/crypto/attributes/proto/attributes.proto b/core/crypto/attributes/proto/attributes.proto
new file mode 100644
index 00000000000..69ed556b699
--- /dev/null
+++ b/core/crypto/attributes/proto/attributes.proto
@@ -0,0 +1,33 @@
+/*
+Copyright IBM Corp. 2016 All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+syntax = "proto3";
+
+package protos;
+
+//AttributesMetadataEntry is an entry within the metadata that store an attribute name with its respective key.
+message AttributesMetadataEntry {
+ string AttributeName = 1;
+ bytes AttributeKey = 2;
+}
+
+//AttributesMetadata holds both the original metadata bytes and the metadata required to access attributes.
+message AttributesMetadata {
+ //Original metadata bytes
+ bytes Metadata = 1;
+ //Entries for each attributes considered.
+ repeated AttributesMetadataEntry Entries = 2;
+}
diff --git a/core/crypto/attributes/test_resources/prek0.dump b/core/crypto/attributes/test_resources/prek0.dump
new file mode 100644
index 00000000000..442dfea6ca2
--- /dev/null
+++ b/core/crypto/attributes/test_resources/prek0.dump
@@ -0,0 +1 @@
+ýA>ø0`ñËÕ›=)»á*¨®g&V'ù aÅàkâJ
\ No newline at end of file
diff --git a/core/crypto/attributes/test_resources/tcert.dump b/core/crypto/attributes/test_resources/tcert.dump
new file mode 100644
index 00000000000..95c9862125f
--- /dev/null
+++ b/core/crypto/attributes/test_resources/tcert.dump
@@ -0,0 +1,18 @@
+-----BEGIN CERTIFICATE-----
+MIIC2TCCAn+gAwIBAgIQBq019NJETm+JnM9pUFY5fDAKBggqhkjOPQQDAzAxMQsw
+CQYDVQQGEwJVUzEUMBIGA1UEChMLSHlwZXJsZWRnZXIxDDAKBgNVBAMTA3RjYTAe
+Fw0xNjA1MjYxODU4MjBaFw0xNjA4MjQxODU4MjBaMDMxCzAJBgNVBAYTAlVTMRQw
+EgYDVQQKEwtIeXBlcmxlZGdlcjEOMAwGA1UEAxMFZGllZ28wWTATBgcqhkjOPQIB
+BggqhkjOPQMBBwNCAARv37sk17/Yq6Ata16fj1CacV3uvYzCwgWx2hearwfEBbEh
++lfmQXYUEu7pcaEaPh+9dNVEeqyHWGqFu2mo5tufo4IBdTCCAXEwDgYDVR0PAQH/
+BAQDAgeAMAwGA1UdEwEB/wQCMAAwDQYDVR0OBAYEBAECAwQwDwYDVR0jBAgwBoAE
+AQIDBDBKBgYqAwQFBgoEQLb0qP6OU62xQQewx5PhpJu9cnLp54+aOpSptE78GNyd
+vh7xNtSHD1GTouEK2RFx1YwJZHAM7OS48JTDZoPr1L4wTQYGKgMEBQYHAQH/BEDv
+kl2JEP0f8OQRch84Hd3o3oGJbOrUBA5eYP0TaQLzpbGop4Z3Uun2Iyllfhixr+Gq
+2Xv1vuSDsbNDpObuQthJMEoGBioDBAUGCARALLtd0x7G/yc2WSSo6ag0nntWayud
+kaIW7NOJiWGJaOFtP+fufzIUPzYvBAuQIk3nYeOLBH/948ZyKsJQWW/LtzBKBgYq
+AwQFBgkEQAXgSabQSC5xHa/YXQi7nlStN81eiG/VhGfTeLvkHXPMDcULAGyKHtax
+l2IaAap9QetXi6pkN78lO048IhFTFCswCgYIKoZIzj0EAwMDSAAwRQIhAMSY4g4E
+hWh7Ey4sOpPYfJwfM82nZHboLEUzrWFwuZ+KAiBf2V0OoXPt2I2MaV1+2OQIaHcJ
+BF8oB65Ox67VENMNUg==
+-----END CERTIFICATE-----
diff --git a/core/crypto/client.go b/core/crypto/client.go
new file mode 100644
index 00000000000..07ee25e5a7f
--- /dev/null
+++ b/core/crypto/client.go
@@ -0,0 +1,160 @@
+/*
+Copyright IBM Corp. 2016 All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package crypto
+
+import (
+ "sync"
+
+ "github.com/hyperledger/fabric/core/crypto/utils"
+)
+
+// Private Variables
+
+type clientEntry struct {
+ client Client
+ counter int64
+}
+
+var (
+ // Map of initialized clients
+ clients = make(map[string]clientEntry)
+
+ // Sync
+ clientMutex sync.Mutex
+)
+
+// Public Methods
+
+// RegisterClient registers a client to the PKI infrastructure
+func RegisterClient(name string, pwd []byte, enrollID, enrollPWD string) error {
+ clientMutex.Lock()
+ defer clientMutex.Unlock()
+
+ log.Infof("Registering client [%s] with name [%s]...", enrollID, name)
+
+ if _, ok := clients[name]; ok {
+ log.Infof("Registering client [%s] with name [%s]...done. Already initialized.", enrollID, name)
+
+ return nil
+ }
+
+ client := newClient()
+ if err := client.register(name, pwd, enrollID, enrollPWD); err != nil {
+ if err != utils.ErrAlreadyRegistered && err != utils.ErrAlreadyInitialized {
+ log.Errorf("Failed registering client [%s] with name [%s] [%s].", enrollID, name, err)
+ return err
+ }
+ log.Infof("Registering client [%s] with name [%s]...done. Already registered or initiliazed.", enrollID, name)
+ }
+ err := client.close()
+ if err != nil {
+ // It is not necessary to report this error to the caller
+ log.Warningf("Registering client [%s] with name [%s]. Failed closing [%s].", enrollID, name, err)
+ }
+
+ log.Infof("Registering client [%s] with name [%s]...done!", enrollID, name)
+
+ return nil
+}
+
+// InitClient initializes a client named name with password pwd
+func InitClient(name string, pwd []byte) (Client, error) {
+ clientMutex.Lock()
+ defer clientMutex.Unlock()
+
+ log.Infof("Initializing client [%s]...", name)
+
+ if entry, ok := clients[name]; ok {
+ log.Infof("Client already initiliazied [%s]. Increasing counter from [%d]", name, clients[name].counter)
+ entry.counter++
+ clients[name] = entry
+
+ return clients[name].client, nil
+ }
+
+ client := newClient()
+ if err := client.init(name, pwd); err != nil {
+ log.Errorf("Failed client initialization [%s]: [%s].", name, err)
+
+ return nil, err
+ }
+
+ clients[name] = clientEntry{client, 1}
+ log.Infof("Initializing client [%s]...done!", name)
+
+ return client, nil
+}
+
+// CloseClient releases all the resources allocated by clients
+func CloseClient(client Client) error {
+ clientMutex.Lock()
+ defer clientMutex.Unlock()
+
+ return closeClientInternal(client, false)
+}
+
+// CloseAllClients closes all the clients initialized so far
+func CloseAllClients() (bool, []error) {
+ clientMutex.Lock()
+ defer clientMutex.Unlock()
+
+ log.Info("Closing all clients...")
+
+ errs := make([]error, len(clients))
+ for _, value := range clients {
+ err := closeClientInternal(value.client, true)
+
+ errs = append(errs, err)
+ }
+
+ log.Info("Closing all clients...done!")
+
+ return len(errs) != 0, errs
+}
+
+// Private Methods
+
+func newClient() *clientImpl {
+ return &clientImpl{&nodeImpl{}, nil, nil, nil, nil}
+}
+
+func closeClientInternal(client Client, force bool) error {
+ if client == nil {
+ return utils.ErrNilArgument
+ }
+
+ name := client.GetName()
+ log.Infof("Closing client [%s]...", name)
+ entry, ok := clients[name]
+ if !ok {
+ return utils.ErrInvalidReference
+ }
+ if entry.counter == 1 || force {
+ defer delete(clients, name)
+ err := clients[name].client.(*clientImpl).close()
+ log.Debugf("Closing client [%s]...cleanup! [%s].", name, utils.ErrToString(err))
+
+ return err
+ }
+
+ // decrease counter
+ entry.counter--
+ clients[name] = entry
+ log.Debugf("Closing client [%s]...decreased counter at [%d].", name, clients[name].counter)
+
+ return nil
+}
diff --git a/core/crypto/client_confidentiality.go b/core/crypto/client_confidentiality.go
new file mode 100644
index 00000000000..20552ec5495
--- /dev/null
+++ b/core/crypto/client_confidentiality.go
@@ -0,0 +1,172 @@
+/*
+Copyright IBM Corp. 2016 All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package crypto
+
+import (
+ "crypto/rand"
+ "encoding/asn1"
+ "errors"
+
+ "github.com/hyperledger/fabric/core/crypto/primitives"
+ "github.com/hyperledger/fabric/core/crypto/utils"
+ obc "github.com/hyperledger/fabric/protos"
+)
+
+func (client *clientImpl) encryptTx(tx *obc.Transaction) error {
+
+ if len(tx.Nonce) == 0 {
+ return errors.New("Failed encrypting payload. Invalid nonce.")
+ }
+
+ client.Debugf("Confidentiality protocol version [%s]", tx.ConfidentialityProtocolVersion)
+ switch tx.ConfidentialityProtocolVersion {
+ case "1.2":
+ client.Debug("Using confidentiality protocol version 1.2")
+ return client.encryptTxVersion1_2(tx)
+ }
+
+ return utils.ErrInvalidProtocolVersion
+}
+
+// chainCodeValidatorMessage1_2 represents a message to validators
+type chainCodeValidatorMessage1_2 struct {
+ PrivateKey []byte
+ StateKey []byte
+}
+
+func (client *clientImpl) encryptTxVersion1_2(tx *obc.Transaction) error {
+ // Create (PK_C,SK_C) pair
+ ccPrivateKey, err := client.eciesSPI.NewPrivateKey(rand.Reader, primitives.GetDefaultCurve())
+ if err != nil {
+ client.Errorf("Failed generate chaincode keypair: [%s]", err)
+
+ return err
+ }
+
+ // Prepare message to the validators
+ var (
+ stateKey []byte
+ privBytes []byte
+ )
+
+ switch tx.Type {
+ case obc.Transaction_CHAINCODE_DEPLOY:
+ // Prepare chaincode stateKey and privateKey
+ stateKey, err = primitives.GenAESKey()
+ if err != nil {
+ client.Errorf("Failed creating state key: [%s]", err)
+
+ return err
+ }
+
+ privBytes, err = client.eciesSPI.SerializePrivateKey(ccPrivateKey)
+ if err != nil {
+ client.Errorf("Failed serializing chaincode key: [%s]", err)
+
+ return err
+ }
+
+ break
+ case obc.Transaction_CHAINCODE_QUERY:
+ // Prepare chaincode stateKey and privateKey
+ stateKey = primitives.HMACAESTruncated(client.queryStateKey, append([]byte{6}, tx.Nonce...))
+
+ privBytes, err = client.eciesSPI.SerializePrivateKey(ccPrivateKey)
+ if err != nil {
+ client.Errorf("Failed serializing chaincode key: [%s]", err)
+
+ return err
+ }
+
+ break
+ case obc.Transaction_CHAINCODE_INVOKE:
+ // Prepare chaincode stateKey and privateKey
+ stateKey = make([]byte, 0)
+
+ privBytes, err = client.eciesSPI.SerializePrivateKey(ccPrivateKey)
+ if err != nil {
+ client.Errorf("Failed serializing chaincode key: [%s]", err)
+
+ return err
+ }
+ break
+ }
+
+ // Encrypt message to the validators
+ cipher, err := client.eciesSPI.NewAsymmetricCipherFromPublicKey(client.chainPublicKey)
+ if err != nil {
+ client.Errorf("Failed creating new encryption scheme: [%s]", err)
+
+ return err
+ }
+
+ msgToValidators, err := asn1.Marshal(chainCodeValidatorMessage1_2{privBytes, stateKey})
+ if err != nil {
+ client.Errorf("Failed preparing message to the validators: [%s]", err)
+
+ return err
+ }
+
+ encMsgToValidators, err := cipher.Process(msgToValidators)
+ if err != nil {
+ client.Errorf("Failed encrypting message to the validators: [%s]", err)
+
+ return err
+ }
+ tx.ToValidators = encMsgToValidators
+
+ // Encrypt the rest of the fields
+
+ // Init with chainccode pk
+ cipher, err = client.eciesSPI.NewAsymmetricCipherFromPublicKey(ccPrivateKey.GetPublicKey())
+ if err != nil {
+ client.Errorf("Failed initiliazing encryption scheme: [%s]", err)
+
+ return err
+ }
+
+ // Encrypt chaincodeID using pkC
+ encryptedChaincodeID, err := cipher.Process(tx.ChaincodeID)
+ if err != nil {
+ client.Errorf("Failed encrypting chaincodeID: [%s]", err)
+
+ return err
+ }
+ tx.ChaincodeID = encryptedChaincodeID
+
+ // Encrypt payload using pkC
+ encryptedPayload, err := cipher.Process(tx.Payload)
+ if err != nil {
+ client.Errorf("Failed encrypting payload: [%s]", err)
+
+ return err
+ }
+ tx.Payload = encryptedPayload
+
+ // Encrypt metadata using pkC
+ if len(tx.Metadata) != 0 {
+ encryptedMetadata, err := cipher.Process(tx.Metadata)
+ if err != nil {
+ client.Errorf("Failed encrypting metadata: [%s]", err)
+
+ return err
+ }
+ tx.Metadata = encryptedMetadata
+ }
+
+ return nil
+}
diff --git a/core/crypto/client_crypto.go b/core/crypto/client_crypto.go
new file mode 100644
index 00000000000..94f6132ddea
--- /dev/null
+++ b/core/crypto/client_crypto.go
@@ -0,0 +1,61 @@
+/*
+Copyright IBM Corp. 2016 All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package crypto
+
+import (
+ "crypto/ecdsa"
+
+ "github.com/hyperledger/fabric/core/crypto/primitives"
+)
+
+func (client *clientImpl) registerCryptoEngine() (err error) {
+ // Store query state key
+ client.queryStateKey, err = primitives.GetRandomNonce()
+ if err != nil {
+ log.Errorf("Failed generating query state key: [%s].", err.Error())
+ return
+ }
+
+ err = client.ks.storeKey(client.conf.getQueryStateKeyFilename(), client.queryStateKey)
+ if err != nil {
+ log.Errorf("Failed storing query state key: [%s].", err.Error())
+ return
+ }
+
+ return
+}
+
+func (client *clientImpl) initCryptoEngine() (err error) {
+ // Load TCertOwnerKDFKey
+ if err = client.initTCertEngine(); err != nil {
+ return
+ }
+
+ // Init query state key
+ client.queryStateKey, err = client.ks.loadKey(client.conf.getQueryStateKeyFilename())
+ if err != nil {
+ return
+ }
+
+ // Init chain publicKey
+ client.chainPublicKey, err = client.eciesSPI.NewPublicKey(nil, client.enrollChainKey.(*ecdsa.PublicKey))
+ if err != nil {
+ return
+ }
+
+ return
+}
diff --git a/core/crypto/client_ecert_handler.go b/core/crypto/client_ecert_handler.go
new file mode 100644
index 00000000000..bec964c2a2e
--- /dev/null
+++ b/core/crypto/client_ecert_handler.go
@@ -0,0 +1,115 @@
+/*
+Copyright IBM Corp. 2016 All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package crypto
+
+import (
+ "github.com/hyperledger/fabric/core/crypto/primitives"
+ "github.com/hyperledger/fabric/core/crypto/utils"
+ obc "github.com/hyperledger/fabric/protos"
+)
+
+type eCertHandlerImpl struct {
+ client *clientImpl
+}
+
+type eCertTransactionHandlerImpl struct {
+ client *clientImpl
+
+ nonce []byte
+ binding []byte
+}
+
+func (handler *eCertHandlerImpl) init(client *clientImpl) error {
+ handler.client = client
+
+ return nil
+}
+
+// GetCertificate returns the TCert DER
+func (handler *eCertHandlerImpl) GetCertificate() []byte {
+ return utils.Clone(handler.client.enrollCert.Raw)
+}
+
+// Sign signs msg using the signing key corresponding to this TCert
+func (handler *eCertHandlerImpl) Sign(msg []byte) ([]byte, error) {
+ return handler.client.signWithEnrollmentKey(msg)
+}
+
+// Verify verifies msg using the verifying key corresponding to this TCert
+func (handler *eCertHandlerImpl) Verify(signature []byte, msg []byte) error {
+ ok, err := handler.client.verifyWithEnrollmentCert(msg, signature)
+ if err != nil {
+ return err
+ }
+ if !ok {
+ return utils.ErrInvalidSignature
+ }
+ return nil
+}
+
+// GetTransactionHandler returns the transaction handler relative to this certificate
+func (handler *eCertHandlerImpl) GetTransactionHandler() (TransactionHandler, error) {
+ txHandler := &eCertTransactionHandlerImpl{}
+ err := txHandler.init(handler.client)
+ if err != nil {
+ handler.client.Errorf("Failed getting transaction handler [%s]", err)
+
+ return nil, err
+ }
+
+ return txHandler, nil
+}
+
+func (handler *eCertTransactionHandlerImpl) init(client *clientImpl) error {
+ nonce, err := client.createTransactionNonce()
+ if err != nil {
+ client.Errorf("Failed initiliazing transaction handler [%s]", err)
+
+ return err
+ }
+
+ handler.client = client
+ handler.nonce = nonce
+ handler.binding = primitives.Hash(append(handler.client.enrollCert.Raw, handler.nonce...))
+
+ return nil
+}
+
+// GetCertificateHandler returns the certificate handler relative to the certificate mapped to this transaction
+func (handler *eCertTransactionHandlerImpl) GetCertificateHandler() (CertificateHandler, error) {
+ return handler.client.GetEnrollmentCertificateHandler()
+}
+
+// GetBinding returns an Binding to the underlying transaction layer
+func (handler *eCertTransactionHandlerImpl) GetBinding() ([]byte, error) {
+ return utils.Clone(handler.binding), nil
+}
+
+// NewChaincodeDeployTransaction is used to deploy chaincode.
+func (handler *eCertTransactionHandlerImpl) NewChaincodeDeployTransaction(chaincodeDeploymentSpec *obc.ChaincodeDeploymentSpec, uuid string, attributeNames ...string) (*obc.Transaction, error) {
+ return handler.client.newChaincodeDeployUsingECert(chaincodeDeploymentSpec, uuid, handler.nonce)
+}
+
+// NewChaincodeExecute is used to execute chaincode's functions.
+func (handler *eCertTransactionHandlerImpl) NewChaincodeExecute(chaincodeInvocation *obc.ChaincodeInvocationSpec, uuid string, attributeNames ...string) (*obc.Transaction, error) {
+ return handler.client.newChaincodeExecuteUsingECert(chaincodeInvocation, uuid, handler.nonce)
+}
+
+// NewChaincodeQuery is used to query chaincode's functions.
+func (handler *eCertTransactionHandlerImpl) NewChaincodeQuery(chaincodeInvocation *obc.ChaincodeInvocationSpec, uuid string, attributeNames ...string) (*obc.Transaction, error) {
+ return handler.client.newChaincodeQueryUsingECert(chaincodeInvocation, uuid, handler.nonce)
+}
diff --git a/core/crypto/client_impl.go b/core/crypto/client_impl.go
new file mode 100644
index 00000000000..7f3f5751bd7
--- /dev/null
+++ b/core/crypto/client_impl.go
@@ -0,0 +1,270 @@
+/*
+Copyright IBM Corp. 2016 All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package crypto
+
+import (
+ "errors"
+
+ "github.com/hyperledger/fabric/core/crypto/primitives"
+ "github.com/hyperledger/fabric/core/crypto/utils"
+ obc "github.com/hyperledger/fabric/protos"
+)
+
+type clientImpl struct {
+ *nodeImpl
+
+ // Chain
+ chainPublicKey primitives.PublicKey
+ queryStateKey []byte
+
+ // TCA KDFKey
+ tCertOwnerKDFKey []byte
+ tCertPool tCertPool
+}
+
+// NewChaincodeDeployTransaction is used to deploy chaincode.
+func (client *clientImpl) NewChaincodeDeployTransaction(chaincodeDeploymentSpec *obc.ChaincodeDeploymentSpec, uuid string, attributes ...string) (*obc.Transaction, error) {
+ // Verify that the client is initialized
+ if !client.IsInitialized() {
+ return nil, utils.ErrNotInitialized
+ }
+
+ // Get next available (not yet used) transaction certificate
+ tCerts, err := client.tCertPool.GetNextTCerts(1, attributes...)
+ if err != nil {
+ client.Errorf("Failed to obtain a (not yet used) TCert for Chaincode Deploy[%s].", err.Error())
+ return nil, err
+ }
+
+ if len(tCerts) != 1 {
+ client.Error("Failed to obtain a (not yet used) TCert.")
+ return nil, errors.New("Failed to obtain a TCert for Chaincode Deploy Transaction using TCert. Expected exactly one returned TCert.")
+ }
+
+ // Create Transaction
+ return client.newChaincodeDeployUsingTCert(chaincodeDeploymentSpec, uuid, attributes, tCerts[0].tCert, nil)
+}
+
+// GetNextTCerts Gets next available (not yet used) transaction certificate.
+func (client *clientImpl) GetNextTCerts(nCerts int, attributes ...string) (tCerts []tCert, err error) {
+ if nCerts < 1 {
+ return nil, errors.New("Number of requested TCerts has to be positive!")
+ }
+
+ // Verify that the client is initialized
+ if !client.IsInitialized() {
+ return nil, utils.ErrNotInitialized
+ }
+
+ // Get next available (not yet used) transaction certificate
+ tBlocks, err := client.tCertPool.GetNextTCerts(nCerts, attributes...)
+ if err != nil {
+ client.Errorf("Failed getting [%d] (not yet used) Transaction Certificates (TCerts) [%s].", nCerts, err.Error())
+ return nil, err
+ }
+ tCerts = make([]tCert, len(tBlocks))
+ for i, eachBlock := range tBlocks {
+ tCerts[i] = eachBlock.tCert
+ }
+ return tCerts, nil
+}
+
+// NewChaincodeInvokeTransaction is used to invoke chaincode's functions.
+func (client *clientImpl) NewChaincodeExecute(chaincodeInvocation *obc.ChaincodeInvocationSpec, uuid string, attributes ...string) (*obc.Transaction, error) {
+ // Verify that the client is initialized
+ if !client.IsInitialized() {
+ return nil, utils.ErrNotInitialized
+ }
+
+ // Get next available (not yet used) transaction certificate
+ tBlocks, err := client.tCertPool.GetNextTCerts(1, attributes...)
+ if err != nil {
+ client.Errorf("Failed to obtain a (not yet used) TCert [%s].", err.Error())
+ return nil, err
+ }
+
+ if len(tBlocks) != 1 {
+ client.Error("Failed to obtain a (not yet used) TCert.")
+ return nil, errors.New("Failed to obtain a TCert for Chaincode Execution. Expected exactly one returned TCert.")
+ }
+
+ // Create Transaction
+ return client.newChaincodeExecuteUsingTCert(chaincodeInvocation, uuid, attributes, tBlocks[0].tCert, nil)
+}
+
+// NewChaincodeQuery is used to query chaincode's functions.
+func (client *clientImpl) NewChaincodeQuery(chaincodeInvocation *obc.ChaincodeInvocationSpec, uuid string, attributes ...string) (*obc.Transaction, error) {
+ // Verify that the client is initialized
+ if !client.IsInitialized() {
+ return nil, utils.ErrNotInitialized
+ }
+
+ // Get next available (not yet used) transaction certificate
+ tBlocks, err := client.tCertPool.GetNextTCerts(1, attributes...)
+ if err != nil {
+ client.Errorf("Failed to obtain a (not yet used) TCert [%s].", err.Error())
+ return nil, err
+ }
+
+ if len(tBlocks) != 1 {
+ client.Error("Failed to obtain a (not yet used) TCert.")
+ return nil, errors.New("Failed to obtain a TCert for Chaincode Invocation. Expected exactly one returned TCert.")
+ }
+
+ // Create Transaction
+ return client.newChaincodeQueryUsingTCert(chaincodeInvocation, uuid, attributes, tBlocks[0].tCert, nil)
+}
+
+// GetEnrollmentCertHandler returns a CertificateHandler whose certificate is the enrollment certificate
+func (client *clientImpl) GetEnrollmentCertificateHandler() (CertificateHandler, error) {
+ // Verify that the client is initialized
+ if !client.IsInitialized() {
+ return nil, utils.ErrNotInitialized
+ }
+
+ // Return the handler
+ handler := &eCertHandlerImpl{}
+ err := handler.init(client)
+ if err != nil {
+ client.Errorf("Failed getting handler [%s].", err.Error())
+ return nil, err
+ }
+
+ return handler, nil
+}
+
+// GetTCertHandlerNext returns a CertificateHandler whose certificate is the next available TCert
+func (client *clientImpl) GetTCertificateHandlerNext(attributes ...string) (CertificateHandler, error) {
+ // Verify that the client is initialized
+ if !client.IsInitialized() {
+ return nil, utils.ErrNotInitialized
+ }
+
+ // Get next TCert
+ tBlocks, err := client.tCertPool.GetNextTCerts(1, attributes...)
+ if err != nil {
+ client.Errorf("Failed to obtain a (not yet used) TCert for creating a CertificateHandler [%s].", err.Error())
+ return nil, err
+ }
+
+ if len(tBlocks) != 1 {
+ client.Error("Failed to obtain a TCert for creating a CertificateHandler.")
+ return nil, errors.New("Failed to obtain a TCert for creating a CertificateHandler")
+ }
+
+ // Return the handler
+ handler := &tCertHandlerImpl{}
+ err = handler.init(client, tBlocks[0].tCert)
+ if err != nil {
+ client.Errorf("Failed getting handler [%s].", err.Error())
+ return nil, err
+ }
+
+ return handler, nil
+}
+
+// GetTCertHandlerFromDER returns a CertificateHandler whose certificate is the one passed
+func (client *clientImpl) GetTCertificateHandlerFromDER(tCertDER []byte) (CertificateHandler, error) {
+ // Verify that the client is initialized
+ if !client.IsInitialized() {
+ return nil, utils.ErrNotInitialized
+ }
+
+ // Validate the transaction certificate
+ tCert, err := client.getTCertFromExternalDER(tCertDER)
+ if err != nil {
+ client.Warningf("Failed validating transaction certificate [%s].", err)
+
+ return nil, err
+ }
+
+ // Return the handler
+ handler := &tCertHandlerImpl{}
+ err = handler.init(client, tCert)
+ if err != nil {
+ client.Errorf("Failed getting handler [%s].", err.Error())
+ return nil, err
+ }
+
+ return handler, nil
+}
+
+func (client *clientImpl) register(id string, pwd []byte, enrollID, enrollPWD string) (err error) {
+
+ clentRegFunc := func(eType NodeType, name string, pwd []byte, enrollID, enrollPWD string) error {
+ client.Info("Register crypto engine...")
+ err = client.registerCryptoEngine()
+ if err != nil {
+ client.Errorf("Failed registering crypto engine [%s]: [%s].", enrollID, err.Error())
+ return nil
+ }
+ client.Info("Register crypto engine...done.")
+ return nil
+ }
+
+ if err = client.nodeImpl.register(NodeClient, id, pwd, enrollID, enrollPWD, clentRegFunc); err != nil {
+ client.Errorf("Failed registering client [%s]: [%s]", enrollID, err)
+ return err
+ }
+
+ return nil
+}
+
+func (client *clientImpl) init(id string, pwd []byte) error {
+
+ clientInitFunc := func(eType NodeType, name string, pwd []byte) error {
+ // Initialize keystore
+ client.Debug("Init keystore...")
+ err := client.initKeyStore()
+ if err != nil {
+ if err != utils.ErrKeyStoreAlreadyInitialized {
+ client.Error("Keystore already initialized.")
+ } else {
+ client.Errorf("Failed initiliazing keystore [%s].", err.Error())
+
+ return err
+ }
+ }
+ client.Debug("Init keystore...done.")
+
+ // Init crypto engine
+ err = client.initCryptoEngine()
+ if err != nil {
+ client.Errorf("Failed initiliazing crypto engine [%s].", err.Error())
+ return err
+ }
+ return nil
+ }
+
+ if err := client.nodeImpl.init(NodeClient, id, pwd, clientInitFunc); err != nil {
+ return err
+ }
+ return nil
+}
+
+func (client *clientImpl) close() (err error) {
+ if client.tCertPool != nil {
+ if err = client.tCertPool.Stop(); err != nil {
+ client.Errorf("Failed closing TCertPool [%s]", err)
+ }
+ }
+
+ if err = client.nodeImpl.close(); err != nil {
+ client.Errorf("Failed closing node [%s]", err)
+ }
+ return
+}
diff --git a/core/crypto/client_ks.go b/core/crypto/client_ks.go
new file mode 100644
index 00000000000..8905049e089
--- /dev/null
+++ b/core/crypto/client_ks.go
@@ -0,0 +1,203 @@
+/*
+Copyright IBM Corp. 2016 All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package crypto
+
+import (
+ "database/sql"
+ "os"
+)
+
+func (client *clientImpl) initKeyStore() error {
+ // Create TCerts directory
+ os.MkdirAll(client.conf.getTCertsPath(), 0755)
+
+ // create tables
+ client.Debugf("Create Table if not exists [TCert] at [%s].", client.conf.getKeyStorePath())
+ if _, err := client.ks.sqlDB.Exec("CREATE TABLE IF NOT EXISTS TCerts (id INTEGER, attrhash VARCHAR, cert BLOB, prkz BLOB, PRIMARY KEY (id))"); err != nil {
+ client.Errorf("Failed creating table [%s].", err)
+ return err
+ }
+
+ client.Debugf("Create Table if not exists [UsedTCert] at [%s].", client.conf.getKeyStorePath())
+ if _, err := client.ks.sqlDB.Exec("CREATE TABLE IF NOT EXISTS UsedTCert (id INTEGER, attrhash VARCHAR, cert BLOB, prkz BLOB, PRIMARY KEY (id))"); err != nil {
+ client.Errorf("Failed creating table [%s].", err)
+ return err
+ }
+
+ return nil
+}
+
+func (ks *keyStore) storeUsedTCert(tCertBlck *TCertBlock) (err error) {
+ ks.m.Lock()
+ defer ks.m.Unlock()
+
+ ks.node.Debug("Storing used TCert...")
+
+ // Open transaction
+ tx, err := ks.sqlDB.Begin()
+ if err != nil {
+ ks.node.Errorf("Failed beginning transaction [%s].", err)
+
+ return
+ }
+
+ // Insert into UsedTCert
+ if _, err = tx.Exec("INSERT INTO UsedTCert (attrhash, cert, prkz) VALUES (?, ?, ?)", tCertBlck.attributesHash, tCertBlck.tCert.GetCertificate().Raw, tCertBlck.tCert.GetPreK0()); err != nil {
+ ks.node.Errorf("Failed inserting TCert to UsedTCert: [%s].", err)
+
+ tx.Rollback()
+
+ return
+ }
+
+ // Finalize
+ err = tx.Commit()
+ if err != nil {
+ ks.node.Errorf("Failed commiting [%s].", err)
+ tx.Rollback()
+
+ return
+ }
+
+ ks.node.Debug("Storing used TCert...done!")
+
+ //name, err := utils.TempFile(ks.conf.getTCertsPath(), "tcert_")
+ //if err != nil {
+ // ks.node.error("Failed storing TCert: [%s]", err)
+ // return
+ //}
+ //
+ //err = ioutil.WriteFile(name, tCert.GetCertificate().Raw, 0700)
+ //if err != nil {
+ // ks.node.error("Failed storing TCert: [%s]", err)
+ // return
+ //}
+
+ return
+}
+
+func (ks *keyStore) storeUnusedTCerts(tCertBlocks []*TCertBlock) (err error) {
+ ks.node.Debug("Storing unused TCerts...")
+
+ if len(tCertBlocks) == 0 {
+ ks.node.Debug("Empty list of unused TCerts.")
+ return
+ }
+
+ // Open transaction
+ tx, err := ks.sqlDB.Begin()
+ if err != nil {
+ ks.node.Errorf("Failed beginning transaction [%s].", err)
+
+ return
+ }
+
+ for _, tCertBlck := range tCertBlocks {
+ // Insert into UsedTCert
+ if _, err = tx.Exec("INSERT INTO TCerts (attrhash, cert, prkz) VALUES (?, ?, ?)", tCertBlck.attributesHash, tCertBlck.tCert.GetCertificate().Raw, tCertBlck.tCert.GetPreK0()); err != nil {
+ ks.node.Errorf("Failed inserting unused TCert to TCerts: [%s].", err)
+
+ tx.Rollback()
+
+ return
+ }
+ }
+
+ // Finalize
+ err = tx.Commit()
+ if err != nil {
+ ks.node.Errorf("Failed commiting [%s].", err)
+ tx.Rollback()
+
+ return
+ }
+
+ ks.node.Debug("Storing unused TCerts...done!")
+
+ return
+}
+
+//Used by the MT pool
+func (ks *keyStore) loadUnusedTCert() ([]byte, error) {
+ // Get the first row available
+ var id int
+ var cert []byte
+ row := ks.sqlDB.QueryRow("SELECT id, cert FROM TCerts")
+ err := row.Scan(&id, &cert)
+
+ if err == sql.ErrNoRows {
+ return nil, nil
+ } else if err != nil {
+ ks.node.Errorf("Error during select [%s].", err.Error())
+
+ return nil, err
+ }
+
+ // Remove from TCert
+ if _, err := ks.sqlDB.Exec("DELETE FROM TCerts WHERE id = ?", id); err != nil {
+ ks.node.Errorf("Failed removing row [%d] from TCert: [%s].", id, err.Error())
+
+ return nil, err
+ }
+
+ return cert, nil
+}
+
+func (ks *keyStore) loadUnusedTCerts() ([]*TCertDBBlock, error) {
+ // Get unused TCerts
+ rows, err := ks.sqlDB.Query("SELECT attrhash, cert, prkz FROM TCerts")
+ if err == sql.ErrNoRows {
+ return nil, nil
+ } else if err != nil {
+ ks.node.Errorf("Error during select [%s].", err)
+
+ return nil, err
+ }
+
+ tCertDBBlocks := []*TCertDBBlock{}
+
+ for {
+ if rows.Next() {
+ var tCertDER []byte
+ var attributeHash string
+ var prek0 []byte
+ if err := rows.Scan(&attributeHash, &tCertDER, &prek0); err != nil {
+ ks.node.Errorf("Error during scan [%s].", err)
+
+ continue
+ }
+
+ var tCertBlk = new(TCertDBBlock)
+ tCertBlk.attributesHash = attributeHash
+ tCertBlk.preK0 = prek0
+ tCertBlk.tCertDER = tCertDER
+
+ tCertDBBlocks = append(tCertDBBlocks, tCertBlk)
+ } else {
+ break
+ }
+ }
+
+ // Delete all entries
+ if _, err = ks.sqlDB.Exec("DELETE FROM TCerts"); err != nil {
+ ks.node.Errorf("Failed cleaning up unused TCert entries: [%s].", err)
+
+ return nil, err
+ }
+
+ return tCertDBBlocks, nil
+}
diff --git a/core/crypto/client_state.go b/core/crypto/client_state.go
new file mode 100644
index 00000000000..db7e98be780
--- /dev/null
+++ b/core/crypto/client_state.go
@@ -0,0 +1,65 @@
+/*
+Copyright IBM Corp. 2016 All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package crypto
+
+import (
+ "crypto/aes"
+ "crypto/cipher"
+
+ "github.com/hyperledger/fabric/core/crypto/primitives"
+ "github.com/hyperledger/fabric/core/crypto/utils"
+ obc "github.com/hyperledger/fabric/protos"
+)
+
+// DecryptQueryResult is used to decrypt the result of a query transaction
+func (client *clientImpl) DecryptQueryResult(queryTx *obc.Transaction, ct []byte) ([]byte, error) {
+ // Verify that the client is initialized
+ if !client.isInitialized {
+ return nil, utils.ErrNotInitialized
+ }
+
+ var queryKey []byte
+
+ switch queryTx.ConfidentialityProtocolVersion {
+ case "1.2":
+ queryKey = primitives.HMACAESTruncated(client.queryStateKey, append([]byte{6}, queryTx.Nonce...))
+ }
+
+ if len(ct) <= primitives.NonceSize {
+ return nil, utils.ErrDecrypt
+ }
+
+ c, err := aes.NewCipher(queryKey)
+ if err != nil {
+ return nil, err
+ }
+
+ gcm, err := cipher.NewGCM(c)
+ if err != nil {
+ return nil, err
+ }
+
+ nonce := make([]byte, gcm.NonceSize())
+ copy(nonce, ct)
+
+ out, err := gcm.Open(nil, nonce, ct[gcm.NonceSize():], nil)
+ if err != nil {
+ client.Errorf("Failed decrypting query result [%s].", err.Error())
+ return nil, utils.ErrDecrypt
+ }
+ return out, nil
+}
diff --git a/core/crypto/client_tca.go b/core/crypto/client_tca.go
new file mode 100644
index 00000000000..dde8179d9f9
--- /dev/null
+++ b/core/crypto/client_tca.go
@@ -0,0 +1,615 @@
+/*
+Copyright IBM Corp. 2016 All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package crypto
+
+import (
+ membersrvc "github.com/hyperledger/fabric/membersrvc/protos"
+
+ "bytes"
+ "crypto/ecdsa"
+ "crypto/hmac"
+
+ "errors"
+ "fmt"
+
+ "google/protobuf"
+ "math/big"
+ "time"
+
+ "github.com/golang/protobuf/proto"
+ "github.com/hyperledger/fabric/core/crypto/primitives"
+ "golang.org/x/net/context"
+)
+
+func (client *clientImpl) initTCertEngine() (err error) {
+ // load TCertOwnerKDFKey
+ if err = client.loadTCertOwnerKDFKey(); err != nil {
+ return
+ }
+
+ // init TCerPool
+ client.Debugf("Using multithreading [%t]", client.conf.IsMultithreadingEnabled())
+ client.Debugf("TCert batch size [%d]", client.conf.getTCertBatchSize())
+
+ if client.conf.IsMultithreadingEnabled() {
+ client.tCertPool = new(tCertPoolMultithreadingImpl)
+ } else {
+ client.tCertPool = new(tCertPoolSingleThreadImpl)
+ }
+
+ if err = client.tCertPool.init(client); err != nil {
+ client.Errorf("Failied inizializing TCertPool: [%s]", err)
+
+ return
+ }
+ if err = client.tCertPool.Start(); err != nil {
+ client.Errorf("Failied starting TCertPool: [%s]", err)
+
+ return
+ }
+ return
+}
+
+func (client *clientImpl) storeTCertOwnerKDFKey() error {
+ if err := client.ks.storeKey(client.conf.getTCertOwnerKDFKeyFilename(), client.tCertOwnerKDFKey); err != nil {
+ client.Errorf("Failed storing TCertOwnerKDFKey [%s].", err.Error())
+
+ return err
+ }
+ return nil
+}
+
+func (client *clientImpl) loadTCertOwnerKDFKey() error {
+ // Load TCertOwnerKDFKey
+ client.Debug("Loading TCertOwnerKDFKey...")
+
+ if !client.ks.isAliasSet(client.conf.getTCertOwnerKDFKeyFilename()) {
+ client.Error("Failed loading TCertOwnerKDFKey. Key is missing.")
+
+ return nil
+ }
+
+ tCertOwnerKDFKey, err := client.ks.loadKey(client.conf.getTCertOwnerKDFKeyFilename())
+ if err != nil {
+ client.Errorf("Failed parsing TCertOwnerKDFKey [%s].", err.Error())
+
+ return err
+ }
+ client.tCertOwnerKDFKey = tCertOwnerKDFKey
+
+ client.Debug("Loading TCertOwnerKDFKey...done!")
+
+ return nil
+}
+
+func (client *clientImpl) getTCertFromExternalDER(der []byte) (tCert, error) {
+ // DER to x509
+ x509Cert, err := primitives.DERToX509Certificate(der)
+ if err != nil {
+ client.Errorf("Failed parsing certificate [% x]: [%s].", der, err)
+
+ return nil, err
+ }
+
+ // Handle Critical Extension TCertEncTCertIndex
+ tCertIndexCT, err := primitives.GetCriticalExtension(x509Cert, primitives.TCertEncTCertIndex)
+ if err != nil {
+ client.Errorf("Failed getting extension TCERT_ENC_TCERTINDEX [% x]: [%s].", der, err)
+
+ return nil, err
+ }
+
+ // Handle Critical Extension TCertEncEnrollmentID TODO validate encEnrollmentID
+ _, err = primitives.GetCriticalExtension(x509Cert, primitives.TCertEncEnrollmentID)
+ if err != nil {
+ client.Errorf("Failed getting extension TCERT_ENC_ENROLLMENT_ID [%s].", err.Error())
+
+ return nil, err
+ }
+
+ // Handle Critical Extension TCertAttributes
+ // for i := 0; i < len(x509Cert.Extensions) - 2; i++ {
+ // attributeExtensionIdentifier := append(utils.TCertEncAttributesBase, i + 9)
+ // _ , err = utils.GetCriticalExtension(x509Cert, attributeExtensionIdentifier)
+ // if err != nil {
+ // client.Errorf("Failed getting extension TCERT_ATTRIBUTE_%s [%s].", i, err.Error())
+ //
+ // return nil, err
+ // }
+ // }
+
+ // Verify certificate against root
+ if _, err := primitives.CheckCertAgainRoot(x509Cert, client.tcaCertPool); err != nil {
+ client.Warningf("Warning verifing certificate [% x]: [%s].", der, err)
+
+ return nil, err
+ }
+
+ // Try to extract the signing key from the TCert by decrypting the TCertIndex
+
+ // 384-bit ExpansionValue = HMAC(Expansion_Key, TCertIndex)
+ // Let TCertIndex = Timestamp, RandValue, 1,2,…
+ // Timestamp assigned, RandValue assigned and counter reinitialized to 1 per batch
+ // Decrypt ct to TCertIndex (TODO: || EnrollPub_Key || EnrollID ?)
+ TCertOwnerEncryptKey := primitives.HMACAESTruncated(client.tCertOwnerKDFKey, []byte{1})
+ ExpansionKey := primitives.HMAC(client.tCertOwnerKDFKey, []byte{2})
+ pt, err := primitives.CBCPKCS7Decrypt(TCertOwnerEncryptKey, tCertIndexCT)
+
+ if err == nil {
+ // Compute ExpansionValue based on TCertIndex
+ TCertIndex := pt
+ // TCertIndex := []byte(strconv.Itoa(i))
+
+ // TODO: verify that TCertIndex has right format.
+
+ client.Debugf("TCertIndex: [% x].", TCertIndex)
+ mac := hmac.New(primitives.NewHash, ExpansionKey)
+ mac.Write(TCertIndex)
+ ExpansionValue := mac.Sum(nil)
+
+ // Derive tpk and tsk accordingly to ExpansionValue from enrollment pk,sk
+ // Computable by TCA / Auditor: TCertPub_Key = EnrollPub_Key + ExpansionValue G
+ // using elliptic curve point addition per NIST FIPS PUB 186-4- specified P-384
+
+ // Compute temporary secret key
+ tempSK := &ecdsa.PrivateKey{
+ PublicKey: ecdsa.PublicKey{
+ Curve: client.enrollPrivKey.Curve,
+ X: new(big.Int),
+ Y: new(big.Int),
+ },
+ D: new(big.Int),
+ }
+
+ var k = new(big.Int).SetBytes(ExpansionValue)
+ var one = new(big.Int).SetInt64(1)
+ n := new(big.Int).Sub(client.enrollPrivKey.Params().N, one)
+ k.Mod(k, n)
+ k.Add(k, one)
+
+ tempSK.D.Add(client.enrollPrivKey.D, k)
+ tempSK.D.Mod(tempSK.D, client.enrollPrivKey.PublicKey.Params().N)
+
+ // Compute temporary public key
+ tempX, tempY := client.enrollPrivKey.PublicKey.ScalarBaseMult(k.Bytes())
+ tempSK.PublicKey.X, tempSK.PublicKey.Y =
+ tempSK.PublicKey.Add(
+ client.enrollPrivKey.PublicKey.X, client.enrollPrivKey.PublicKey.Y,
+ tempX, tempY,
+ )
+
+ // Verify temporary public key is a valid point on the reference curve
+ isOn := tempSK.Curve.IsOnCurve(tempSK.PublicKey.X, tempSK.PublicKey.Y)
+ if !isOn {
+ client.Warning("Failed temporary public key IsOnCurve check. This is an foreign certificate.")
+
+ return &tCertImpl{client, x509Cert, nil, []byte{}}, nil
+ }
+
+ // Check that the derived public key is the same as the one in the certificate
+ certPK := x509Cert.PublicKey.(*ecdsa.PublicKey)
+
+ if certPK.X.Cmp(tempSK.PublicKey.X) != 0 {
+ client.Warning("Derived public key is different on X. This is an foreign certificate.")
+
+ return &tCertImpl{client, x509Cert, nil, []byte{}}, nil
+ }
+
+ if certPK.Y.Cmp(tempSK.PublicKey.Y) != 0 {
+ client.Warning("Derived public key is different on Y. This is an foreign certificate.")
+
+ return &tCertImpl{client, x509Cert, nil, []byte{}}, nil
+ }
+
+ // Verify the signing capability of tempSK
+ err = primitives.VerifySignCapability(tempSK, x509Cert.PublicKey)
+ if err != nil {
+ client.Warning("Failed verifing signing capability [%s]. This is an foreign certificate.", err.Error())
+
+ return &tCertImpl{client, x509Cert, nil, []byte{}}, nil
+ }
+
+ // Marshall certificate and secret key to be stored in the database
+ if err != nil {
+ client.Warningf("Failed marshalling private key [%s]. This is an foreign certificate.", err.Error())
+
+ return &tCertImpl{client, x509Cert, nil, []byte{}}, nil
+ }
+
+ if err = primitives.CheckCertPKAgainstSK(x509Cert, interface{}(tempSK)); err != nil {
+ client.Warningf("Failed checking TCA cert PK against private key [%s]. This is an foreign certificate.", err.Error())
+
+ return &tCertImpl{client, x509Cert, nil, []byte{}}, nil
+ }
+
+ return &tCertImpl{client, x509Cert, tempSK, []byte{}}, nil
+ }
+ client.Warningf("Failed decrypting extension TCERT_ENC_TCERTINDEX [%s]. This is an foreign certificate.", err.Error())
+ return &tCertImpl{client, x509Cert, nil, []byte{}}, nil
+}
+
+func (client *clientImpl) getTCertFromDER(certBlk *TCertDBBlock) (certBlock *TCertBlock, err error) {
+ if client.tCertOwnerKDFKey == nil {
+ return nil, fmt.Errorf("KDF key not initialized yet")
+ }
+
+ TCertOwnerEncryptKey := primitives.HMACAESTruncated(client.tCertOwnerKDFKey, []byte{1})
+ ExpansionKey := primitives.HMAC(client.tCertOwnerKDFKey, []byte{2})
+
+ // DER to x509
+ x509Cert, err := primitives.DERToX509Certificate(certBlk.tCertDER)
+ if err != nil {
+ client.Errorf("Failed parsing certificate [% x]: [%s].", certBlk.tCertDER, err)
+
+ return
+ }
+
+ // Handle Critical Extenstion TCertEncTCertIndex
+ tCertIndexCT, err := primitives.GetCriticalExtension(x509Cert, primitives.TCertEncTCertIndex)
+ if err != nil {
+ client.Errorf("Failed getting extension TCERT_ENC_TCERTINDEX [%v].", err.Error())
+
+ return
+ }
+
+ // Verify certificate against root
+ if _, err = primitives.CheckCertAgainRoot(x509Cert, client.tcaCertPool); err != nil {
+ client.Warningf("Warning verifing certificate [%s].", err.Error())
+
+ return
+ }
+
+ // Verify public key
+
+ // 384-bit ExpansionValue = HMAC(Expansion_Key, TCertIndex)
+ // Let TCertIndex = Timestamp, RandValue, 1,2,…
+ // Timestamp assigned, RandValue assigned and counter reinitialized to 1 per batch
+
+ // Decrypt ct to TCertIndex (TODO: || EnrollPub_Key || EnrollID ?)
+ pt, err := primitives.CBCPKCS7Decrypt(TCertOwnerEncryptKey, tCertIndexCT)
+ if err != nil {
+ client.Errorf("Failed decrypting extension TCERT_ENC_TCERTINDEX [%s].", err.Error())
+
+ return
+ }
+
+ // Compute ExpansionValue based on TCertIndex
+ TCertIndex := pt
+ // TCertIndex := []byte(strconv.Itoa(i))
+
+ client.Debugf("TCertIndex: [% x].", TCertIndex)
+ mac := hmac.New(primitives.NewHash, ExpansionKey)
+ mac.Write(TCertIndex)
+ ExpansionValue := mac.Sum(nil)
+
+ // Derive tpk and tsk accordingly to ExpansionValue from enrollment pk,sk
+ // Computable by TCA / Auditor: TCertPub_Key = EnrollPub_Key + ExpansionValue G
+ // using elliptic curve point addition per NIST FIPS PUB 186-4- specified P-384
+
+ // Compute temporary secret key
+ tempSK := &ecdsa.PrivateKey{
+ PublicKey: ecdsa.PublicKey{
+ Curve: client.enrollPrivKey.Curve,
+ X: new(big.Int),
+ Y: new(big.Int),
+ },
+ D: new(big.Int),
+ }
+
+ var k = new(big.Int).SetBytes(ExpansionValue)
+ var one = new(big.Int).SetInt64(1)
+ n := new(big.Int).Sub(client.enrollPrivKey.Params().N, one)
+ k.Mod(k, n)
+ k.Add(k, one)
+
+ tempSK.D.Add(client.enrollPrivKey.D, k)
+ tempSK.D.Mod(tempSK.D, client.enrollPrivKey.PublicKey.Params().N)
+
+ // Compute temporary public key
+ tempX, tempY := client.enrollPrivKey.PublicKey.ScalarBaseMult(k.Bytes())
+ tempSK.PublicKey.X, tempSK.PublicKey.Y =
+ tempSK.PublicKey.Add(
+ client.enrollPrivKey.PublicKey.X, client.enrollPrivKey.PublicKey.Y,
+ tempX, tempY,
+ )
+
+ // Verify temporary public key is a valid point on the reference curve
+ isOn := tempSK.Curve.IsOnCurve(tempSK.PublicKey.X, tempSK.PublicKey.Y)
+ if !isOn {
+ client.Error("Failed temporary public key IsOnCurve check.")
+
+ return nil, fmt.Errorf("Failed temporary public key IsOnCurve check.")
+ }
+
+ // Check that the derived public key is the same as the one in the certificate
+ certPK := x509Cert.PublicKey.(*ecdsa.PublicKey)
+
+ if certPK.X.Cmp(tempSK.PublicKey.X) != 0 {
+ client.Error("Derived public key is different on X")
+
+ return nil, fmt.Errorf("Derived public key is different on X")
+ }
+
+ if certPK.Y.Cmp(tempSK.PublicKey.Y) != 0 {
+ client.Error("Derived public key is different on Y")
+
+ return nil, fmt.Errorf("Derived public key is different on Y")
+ }
+
+ // Verify the signing capability of tempSK
+ err = primitives.VerifySignCapability(tempSK, x509Cert.PublicKey)
+ if err != nil {
+ client.Errorf("Failed verifing signing capability [%s].", err.Error())
+
+ return
+ }
+
+ // Marshall certificate and secret key to be stored in the database
+ if err != nil {
+ client.Errorf("Failed marshalling private key [%s].", err.Error())
+
+ return
+ }
+
+ if err = primitives.CheckCertPKAgainstSK(x509Cert, interface{}(tempSK)); err != nil {
+ client.Errorf("Failed checking TCA cert PK against private key [%s].", err.Error())
+
+ return
+ }
+
+ certBlock = &TCertBlock{&tCertImpl{client, x509Cert, tempSK, certBlk.preK0}, certBlk.attributesHash}
+
+ return
+}
+
+func (client *clientImpl) getTCertsFromTCA(attrhash string, attributes []string, num int) error {
+ client.Debugf("Get [%d] certificates from the TCA...", num)
+
+ // Contact the TCA
+ TCertOwnerKDFKey, certDERs, err := client.callTCACreateCertificateSet(num, attributes)
+ if err != nil {
+ client.Errorf("Failed contacting TCA [%s].", err.Error())
+
+ return err
+ }
+
+ // client.debug("TCertOwnerKDFKey [%s].", utils.EncodeBase64(TCertOwnerKDFKey))
+
+ // Store TCertOwnerKDFKey and checks that every time it is always the same key
+ if client.tCertOwnerKDFKey != nil {
+ // Check that the keys are the same
+ equal := bytes.Equal(client.tCertOwnerKDFKey, TCertOwnerKDFKey)
+ if !equal {
+ return errors.New("Failed reciving kdf key from TCA. The keys are different.")
+ }
+ } else {
+ client.tCertOwnerKDFKey = TCertOwnerKDFKey
+
+ // TODO: handle this situation more carefully
+ if err := client.storeTCertOwnerKDFKey(); err != nil {
+ client.Errorf("Failed storing TCertOwnerKDFKey [%s].", err.Error())
+
+ return err
+ }
+ }
+
+ // Validate the Certificates obtained
+
+ TCertOwnerEncryptKey := primitives.HMACAESTruncated(client.tCertOwnerKDFKey, []byte{1})
+ ExpansionKey := primitives.HMAC(client.tCertOwnerKDFKey, []byte{2})
+
+ j := 0
+ for i := 0; i < num; i++ {
+ // DER to x509
+ x509Cert, err := primitives.DERToX509Certificate(certDERs[i].Cert)
+ prek0 := certDERs[i].Prek0
+ if err != nil {
+ client.Errorf("Failed parsing certificate [% x]: [%s].", certDERs[i].Cert, err)
+
+ continue
+ }
+
+ // Handle Critical Extenstion TCertEncTCertIndex
+ tCertIndexCT, err := primitives.GetCriticalExtension(x509Cert, primitives.TCertEncTCertIndex)
+ if err != nil {
+ client.Errorf("Failed getting extension TCERT_ENC_TCERTINDEX [% x]: [%s].", primitives.TCertEncTCertIndex, err)
+
+ continue
+ }
+
+ // Verify certificate against root
+ if _, err := primitives.CheckCertAgainRoot(x509Cert, client.tcaCertPool); err != nil {
+ client.Warningf("Warning verifing certificate [%s].", err.Error())
+
+ continue
+ }
+
+ // Verify public key
+
+ // 384-bit ExpansionValue = HMAC(Expansion_Key, TCertIndex)
+ // Let TCertIndex = Timestamp, RandValue, 1,2,…
+ // Timestamp assigned, RandValue assigned and counter reinitialized to 1 per batch
+
+ // Decrypt ct to TCertIndex (TODO: || EnrollPub_Key || EnrollID ?)
+ pt, err := primitives.CBCPKCS7Decrypt(TCertOwnerEncryptKey, tCertIndexCT)
+ if err != nil {
+ client.Errorf("Failed decrypting extension TCERT_ENC_TCERTINDEX [%s].", err.Error())
+
+ continue
+ }
+
+ // Compute ExpansionValue based on TCertIndex
+ TCertIndex := pt
+ // TCertIndex := []byte(strconv.Itoa(i))
+
+ client.Debugf("TCertIndex: [% x].", TCertIndex)
+ mac := hmac.New(primitives.NewHash, ExpansionKey)
+ mac.Write(TCertIndex)
+ ExpansionValue := mac.Sum(nil)
+
+ // Derive tpk and tsk accordingly to ExpansionValue from enrollment pk,sk
+ // Computable by TCA / Auditor: TCertPub_Key = EnrollPub_Key + ExpansionValue G
+ // using elliptic curve point addition per NIST FIPS PUB 186-4- specified P-384
+
+ // Compute temporary secret key
+ tempSK := &ecdsa.PrivateKey{
+ PublicKey: ecdsa.PublicKey{
+ Curve: client.enrollPrivKey.Curve,
+ X: new(big.Int),
+ Y: new(big.Int),
+ },
+ D: new(big.Int),
+ }
+
+ var k = new(big.Int).SetBytes(ExpansionValue)
+ var one = new(big.Int).SetInt64(1)
+ n := new(big.Int).Sub(client.enrollPrivKey.Params().N, one)
+ k.Mod(k, n)
+ k.Add(k, one)
+
+ tempSK.D.Add(client.enrollPrivKey.D, k)
+ tempSK.D.Mod(tempSK.D, client.enrollPrivKey.PublicKey.Params().N)
+
+ // Compute temporary public key
+ tempX, tempY := client.enrollPrivKey.PublicKey.ScalarBaseMult(k.Bytes())
+ tempSK.PublicKey.X, tempSK.PublicKey.Y =
+ tempSK.PublicKey.Add(
+ client.enrollPrivKey.PublicKey.X, client.enrollPrivKey.PublicKey.Y,
+ tempX, tempY,
+ )
+
+ // Verify temporary public key is a valid point on the reference curve
+ isOn := tempSK.Curve.IsOnCurve(tempSK.PublicKey.X, tempSK.PublicKey.Y)
+ if !isOn {
+ client.Error("Failed temporary public key IsOnCurve check.")
+
+ continue
+ }
+
+ // Check that the derived public key is the same as the one in the certificate
+ certPK := x509Cert.PublicKey.(*ecdsa.PublicKey)
+
+ if certPK.X.Cmp(tempSK.PublicKey.X) != 0 {
+ client.Error("Derived public key is different on X")
+
+ continue
+ }
+
+ if certPK.Y.Cmp(tempSK.PublicKey.Y) != 0 {
+ client.Error("Derived public key is different on Y")
+
+ continue
+ }
+
+ // Verify the signing capability of tempSK
+ err = primitives.VerifySignCapability(tempSK, x509Cert.PublicKey)
+ if err != nil {
+ client.Errorf("Failed verifing signing capability [%s].", err.Error())
+
+ continue
+ }
+
+ // Marshall certificate and secret key to be stored in the database
+ if err != nil {
+ client.Errorf("Failed marshalling private key [%s].", err.Error())
+
+ continue
+ }
+
+ if err := primitives.CheckCertPKAgainstSK(x509Cert, interface{}(tempSK)); err != nil {
+ client.Errorf("Failed checking TCA cert PK against private key [%s].", err.Error())
+
+ continue
+ }
+
+ client.Debugf("Sub index [%d]", j)
+ j++
+ client.Debugf("Certificate [%d] validated.", i)
+
+ prek0Cp := make([]byte, len(prek0))
+ copy(prek0Cp, prek0)
+
+ tcertBlk := new(TCertBlock)
+
+ tcertBlk.tCert = &tCertImpl{client, x509Cert, tempSK, prek0Cp}
+ tcertBlk.attributesHash = attrhash
+
+ client.tCertPool.AddTCert(tcertBlk)
+ }
+
+ if j == 0 {
+ client.Error("No valid TCert was sent")
+
+ return errors.New("No valid TCert was sent.")
+ }
+
+ return nil
+}
+
+func (client *clientImpl) callTCACreateCertificateSet(num int, attributes []string) ([]byte, []*membersrvc.TCert, error) {
+ // Get a TCA Client
+ sock, tcaP, err := client.getTCAClient()
+ defer sock.Close()
+
+ var attributesList []*membersrvc.TCertAttribute
+
+ for _, k := range attributes {
+ tcertAttr := new(membersrvc.TCertAttribute)
+ tcertAttr.AttributeName = k
+ attributesList = append(attributesList, tcertAttr)
+ }
+
+ // Execute the protocol
+ now := time.Now()
+ timestamp := google_protobuf.Timestamp{Seconds: int64(now.Second()), Nanos: int32(now.Nanosecond())}
+ req := &membersrvc.TCertCreateSetReq{
+ Ts: ×tamp,
+ Id: &membersrvc.Identity{Id: client.enrollID},
+ Num: uint32(num),
+ Attributes: attributesList,
+ Sig: nil,
+ }
+
+ rawReq, err := proto.Marshal(req)
+ if err != nil {
+ client.Errorf("Failed marshaling request [%s].", err.Error())
+ return nil, nil, err
+ }
+
+ // 2. Sign rawReq
+ r, s, err := client.ecdsaSignWithEnrollmentKey(rawReq)
+ if err != nil {
+ client.Errorf("Failed creating signature for [% x]: [%s].", rawReq, err.Error())
+ return nil, nil, err
+ }
+
+ R, _ := r.MarshalText()
+ S, _ := s.MarshalText()
+
+ // 3. Append the signature
+ req.Sig = &membersrvc.Signature{Type: membersrvc.CryptoType_ECDSA, R: R, S: S}
+
+ // 4. Send request
+ certSet, err := tcaP.CreateCertificateSet(context.Background(), req)
+ if err != nil {
+ client.Errorf("Failed requesting tca create certificate set [%s].", err.Error())
+
+ return nil, nil, err
+ }
+
+ return certSet.Certs.Key, certSet.Certs.Certs, nil
+}
diff --git a/core/crypto/client_tcert.go b/core/crypto/client_tcert.go
new file mode 100644
index 00000000000..1c99e63cef7
--- /dev/null
+++ b/core/crypto/client_tcert.go
@@ -0,0 +1,88 @@
+/*
+Copyright IBM Corp. 2016 All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package crypto
+
+import (
+ "crypto/x509"
+
+ "github.com/hyperledger/fabric/core/crypto/attributes"
+ "github.com/hyperledger/fabric/core/crypto/utils"
+)
+
+type tCert interface {
+ //GetCertificate returns the x509 certificate of the TCert.
+ GetCertificate() *x509.Certificate
+
+ //GetPreK0 returns the PreK0 of the TCert. This key is used to derivate attributes keys.
+ GetPreK0() []byte
+
+ //Sign signs a msg with the TCert secret key an returns the signature.
+ Sign(msg []byte) ([]byte, error)
+
+ //Verify verifies signature and message using the TCert public key.
+ Verify(signature, msg []byte) error
+
+ //GetKForAttribute derives the key for a specific attribute name.
+ GetKForAttribute(attributeName string) ([]byte, error)
+}
+
+type tCertImpl struct {
+ client *clientImpl
+ cert *x509.Certificate
+ sk interface{}
+ preK0 []byte
+}
+
+//GetCertificate returns the x509 certificate of the TCert.
+func (tCert *tCertImpl) GetCertificate() *x509.Certificate {
+ return tCert.cert
+}
+
+//GetPreK0 returns the PreK0 of the TCert. This key is used to derivate attributes keys.
+func (tCert *tCertImpl) GetPreK0() []byte {
+ return tCert.preK0
+}
+
+//Sign signs a msg with the TCert secret key an returns the signature.
+func (tCert *tCertImpl) Sign(msg []byte) ([]byte, error) {
+ if tCert.sk == nil {
+ return nil, utils.ErrNilArgument
+ }
+
+ return tCert.client.sign(tCert.sk, msg)
+}
+
+//Verify verifies signature and message using the TCert public key.
+func (tCert *tCertImpl) Verify(signature, msg []byte) (err error) {
+ ok, err := tCert.client.verify(tCert.cert.PublicKey, msg, signature)
+ if err != nil {
+ return
+ }
+ if !ok {
+ return utils.ErrInvalidSignature
+ }
+ return
+}
+
+//GetKForAttribute derives the key for a specific attribute name.
+func (tCert *tCertImpl) GetKForAttribute(attributeName string) ([]byte, error) {
+ if tCert.preK0 == nil {
+ return nil, utils.ErrNilArgument
+ }
+
+ return attributes.GetKForAttribute(attributeName, tCert.preK0, tCert.GetCertificate())
+}
diff --git a/core/crypto/client_tcert_handler.go b/core/crypto/client_tcert_handler.go
new file mode 100644
index 00000000000..ca8b0b388df
--- /dev/null
+++ b/core/crypto/client_tcert_handler.go
@@ -0,0 +1,111 @@
+/*
+Copyright IBM Corp. 2016 All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package crypto
+
+import (
+ "github.com/hyperledger/fabric/core/crypto/primitives"
+ "github.com/hyperledger/fabric/core/crypto/utils"
+ obc "github.com/hyperledger/fabric/protos"
+)
+
+type tCertHandlerImpl struct {
+ client *clientImpl
+
+ tCert tCert
+}
+
+type tCertTransactionHandlerImpl struct {
+ tCertHandler *tCertHandlerImpl
+
+ nonce []byte
+ binding []byte
+}
+
+func (handler *tCertHandlerImpl) init(client *clientImpl, tCert tCert) error {
+ handler.client = client
+ handler.tCert = tCert
+
+ return nil
+}
+
+// GetCertificate returns the TCert DER
+func (handler *tCertHandlerImpl) GetCertificate() []byte {
+ return utils.Clone(handler.tCert.GetCertificate().Raw)
+}
+
+// Sign signs msg using the signing key corresponding to this TCert
+func (handler *tCertHandlerImpl) Sign(msg []byte) ([]byte, error) {
+ return handler.tCert.Sign(msg)
+}
+
+// Verify verifies msg using the verifying key corresponding to this TCert
+func (handler *tCertHandlerImpl) Verify(signature []byte, msg []byte) error {
+ return handler.tCert.Verify(signature, msg)
+}
+
+// GetTransactionHandler returns the transaction handler relative to this certificate
+func (handler *tCertHandlerImpl) GetTransactionHandler() (TransactionHandler, error) {
+ txHandler := &tCertTransactionHandlerImpl{}
+ err := txHandler.init(handler)
+ if err != nil {
+ handler.client.Errorf("Failed initiliazing transaction handler [%s]", err)
+
+ return nil, err
+ }
+
+ return txHandler, nil
+}
+
+func (handler *tCertTransactionHandlerImpl) init(tCertHandler *tCertHandlerImpl) error {
+ nonce, err := tCertHandler.client.createTransactionNonce()
+ if err != nil {
+ tCertHandler.client.Errorf("Failed initiliazing transaction handler [%s]", err)
+
+ return err
+ }
+
+ handler.tCertHandler = tCertHandler
+ handler.nonce = nonce
+ handler.binding = primitives.Hash(append(handler.tCertHandler.tCert.GetCertificate().Raw, nonce...))
+
+ return nil
+}
+
+// GetCertificateHandler returns the certificate handler relative to the certificate mapped to this transaction
+func (handler *tCertTransactionHandlerImpl) GetCertificateHandler() (CertificateHandler, error) {
+ return handler.tCertHandler, nil
+}
+
+// GetBinding returns an Binding to the underlying transaction layer
+func (handler *tCertTransactionHandlerImpl) GetBinding() ([]byte, error) {
+ return utils.Clone(handler.binding), nil
+}
+
+// NewChaincodeDeployTransaction is used to deploy chaincode.
+func (handler *tCertTransactionHandlerImpl) NewChaincodeDeployTransaction(chaincodeDeploymentSpec *obc.ChaincodeDeploymentSpec, uuid string, attributeNames ...string) (*obc.Transaction, error) {
+ return handler.tCertHandler.client.newChaincodeDeployUsingTCert(chaincodeDeploymentSpec, uuid, attributeNames, handler.tCertHandler.tCert, handler.nonce)
+}
+
+// NewChaincodeExecute is used to execute chaincode's functions.
+func (handler *tCertTransactionHandlerImpl) NewChaincodeExecute(chaincodeInvocation *obc.ChaincodeInvocationSpec, uuid string, attributeNames ...string) (*obc.Transaction, error) {
+ return handler.tCertHandler.client.newChaincodeExecuteUsingTCert(chaincodeInvocation, uuid, attributeNames, handler.tCertHandler.tCert, handler.nonce)
+}
+
+// NewChaincodeQuery is used to query chaincode's functions.
+func (handler *tCertTransactionHandlerImpl) NewChaincodeQuery(chaincodeInvocation *obc.ChaincodeInvocationSpec, uuid string, attributeNames ...string) (*obc.Transaction, error) {
+ return handler.tCertHandler.client.newChaincodeQueryUsingTCert(chaincodeInvocation, uuid, attributeNames, handler.tCertHandler.tCert, handler.nonce)
+}
diff --git a/core/crypto/client_tcert_pool.go b/core/crypto/client_tcert_pool.go
new file mode 100644
index 00000000000..775341b6b3c
--- /dev/null
+++ b/core/crypto/client_tcert_pool.go
@@ -0,0 +1,29 @@
+/*
+Copyright IBM Corp. 2016 All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package crypto
+
+type tCertPool interface {
+ init(client *clientImpl) error
+
+ Start() error
+
+ Stop() error
+
+ GetNextTCerts(nCerts int, attributes ...string) ([]*TCertBlock, error)
+
+ AddTCert(tCertBlock *TCertBlock) (err error)
+}
diff --git a/core/crypto/client_tcert_pool_mt.go b/core/crypto/client_tcert_pool_mt.go
new file mode 100644
index 00000000000..6fd52d26044
--- /dev/null
+++ b/core/crypto/client_tcert_pool_mt.go
@@ -0,0 +1,335 @@
+/*
+Copyright IBM Corp. 2016 All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package crypto
+
+import (
+ "errors"
+ "runtime"
+ "strings"
+ "sync"
+ "time"
+)
+
+type tCertPoolEntry struct {
+ attributes []string
+ tCertChannel chan *TCertBlock
+ tCertChannelFeedback chan struct{}
+ done chan struct{}
+ client *clientImpl
+ tCertBlock *TCertBlock
+}
+
+//NewTCertPoolEntry creates a new tcert pool entry
+func newTCertPoolEntry(client *clientImpl, attributes []string) *tCertPoolEntry {
+ tCertChannel := make(chan *TCertBlock, client.conf.getTCertBatchSize()*2)
+ tCertChannelFeedback := make(chan struct{}, client.conf.getTCertBatchSize()*2)
+ done := make(chan struct{}, 1)
+ return &tCertPoolEntry{attributes, tCertChannel, tCertChannelFeedback, done, client, nil}
+}
+
+//Start starts the pool entry filler loop.
+func (tCertPoolEntry *tCertPoolEntry) Start() (err error) {
+ // Start the filler
+ go tCertPoolEntry.filler()
+ return
+}
+
+//Stop stops the pool entry filler loop.
+func (tCertPoolEntry *tCertPoolEntry) Stop() (err error) {
+ // Stop the filler
+ tCertPoolEntry.done <- struct{}{}
+
+ // Store unused TCert
+ tCertPoolEntry.client.Debug("Store unused TCerts...")
+
+ tCerts := make([]*TCertBlock, 0)
+ for {
+ if len(tCertPoolEntry.tCertChannel) > 0 {
+ tCerts = append(tCerts, <-tCertPoolEntry.tCertChannel)
+ } else {
+ break
+ }
+ }
+
+ tCertPoolEntry.client.Debugf("Found %d unused TCerts...", len(tCerts))
+
+ tCertPoolEntry.client.ks.storeUnusedTCerts(tCerts)
+
+ tCertPoolEntry.client.Debug("Store unused TCerts...done!")
+
+ return
+}
+
+//AddTCert add a tcert to the poolEntry.
+func (tCertPoolEntry *tCertPoolEntry) AddTCert(tCertBlock *TCertBlock) (err error) {
+ tCertPoolEntry.tCertChannel <- tCertBlock
+ return
+}
+
+//GetNextTCert gets the next tcert of the pool.
+func (tCertPoolEntry *tCertPoolEntry) GetNextTCert(attributes ...string) (tCertBlock *TCertBlock, err error) {
+ for i := 0; i < 3; i++ {
+ tCertPoolEntry.client.Debugf("Getting next TCert... %d out of 3", i)
+ select {
+ case tCertPoolEntry.tCertBlock = <-tCertPoolEntry.tCertChannel:
+ break
+ case <-time.After(30 * time.Second):
+ tCertPoolEntry.client.Error("Failed getting a new TCert. Buffer is empty!")
+ }
+ if tCertPoolEntry.tCertBlock != nil {
+ // Send feedback to the filler
+ tCertPoolEntry.client.Debug("Send feedback")
+ tCertPoolEntry.tCertChannelFeedback <- struct{}{}
+ break
+ }
+ }
+
+ if tCertPoolEntry.tCertBlock == nil {
+ // TODO: change error here
+ return nil, errors.New("Failed getting a new TCert. Buffer is empty!")
+ }
+
+ tCertBlock = tCertPoolEntry.tCertBlock
+ tCertPoolEntry.client.Debugf("Cert [% x].", tCertBlock.tCert.GetCertificate().Raw)
+
+ // Store the TCert permanently
+ tCertPoolEntry.client.ks.storeUsedTCert(tCertBlock)
+
+ tCertPoolEntry.client.Debug("Getting next TCert...done!")
+
+ return
+}
+
+func (tCertPoolEntry *tCertPoolEntry) filler() {
+ // Load unused TCerts
+ stop := false
+ full := false
+ tCertPoolEntry.client.Debug("Filler()")
+
+ attributeHash := calculateAttributesHash(tCertPoolEntry.attributes)
+ for {
+ // Check if Stop was called
+ select {
+ case <-tCertPoolEntry.done:
+ tCertPoolEntry.client.Debug("Force stop!")
+ stop = true
+ default:
+ }
+ if stop {
+ break
+ }
+
+ tCertDBBlocks, err := tCertPoolEntry.client.ks.loadUnusedTCerts()
+
+ if err != nil {
+ tCertPoolEntry.client.Errorf("Failed loading TCert: [%s]", err)
+ break
+ }
+ if tCertDBBlocks == nil {
+ tCertPoolEntry.client.Debug("No more TCerts in cache!")
+ break
+ }
+
+ var tCert *TCertBlock
+ for _, tCertDBBlock := range tCertDBBlocks {
+ if strings.Compare(attributeHash, tCertDBBlock.attributesHash) == 0 {
+ tCertBlock, err := tCertPoolEntry.client.getTCertFromDER(tCertDBBlock)
+ if err != nil {
+ tCertPoolEntry.client.Errorf("Failed paring TCert [% x]: [%s]", tCertDBBlock.tCertDER, err)
+ continue
+ }
+ tCert = tCertBlock
+ }
+ }
+
+ if tCert != nil {
+ // Try to send the tCert to the channel if not full
+ select {
+ case tCertPoolEntry.tCertChannel <- tCert:
+ tCertPoolEntry.client.Debug("TCert send to the channel!")
+ default:
+ tCertPoolEntry.client.Debug("Channell Full!")
+ full = true
+ }
+ if full {
+ break
+ }
+ } else {
+ tCertPoolEntry.client.Debug("No more TCerts in cache!")
+ break
+ }
+ }
+
+ tCertPoolEntry.client.Debug("Load unused TCerts...done!")
+
+ if !stop {
+ ticker := time.NewTicker(1 * time.Second)
+ for {
+ select {
+ case <-tCertPoolEntry.done:
+ stop = true
+ tCertPoolEntry.client.Debug("Done signal.")
+ case <-tCertPoolEntry.tCertChannelFeedback:
+ tCertPoolEntry.client.Debug("Feedback received. Time to check for tcerts")
+ case <-ticker.C:
+ tCertPoolEntry.client.Debug("Time elapsed. Time to check for tcerts")
+ }
+
+ if stop {
+ tCertPoolEntry.client.Debug("Quitting filler...")
+ break
+ }
+
+ if len(tCertPoolEntry.tCertChannel) < tCertPoolEntry.client.conf.getTCertBatchSize() {
+ tCertPoolEntry.client.Debugf("Refill TCert Pool. Current size [%d].",
+ len(tCertPoolEntry.tCertChannel),
+ )
+
+ var numTCerts = cap(tCertPoolEntry.tCertChannel) - len(tCertPoolEntry.tCertChannel)
+ if len(tCertPoolEntry.tCertChannel) == 0 {
+ numTCerts = cap(tCertPoolEntry.tCertChannel) / 10
+ if numTCerts < 1 {
+ numTCerts = 1
+ }
+ }
+
+ tCertPoolEntry.client.Infof("Refilling [%d] TCerts.", numTCerts)
+
+ err := tCertPoolEntry.client.getTCertsFromTCA(calculateAttributesHash(tCertPoolEntry.attributes), tCertPoolEntry.attributes, numTCerts)
+ if err != nil {
+ tCertPoolEntry.client.Errorf("Failed getting TCerts from the TCA: [%s]", err)
+ break
+ }
+ }
+ }
+ }
+
+ tCertPoolEntry.client.Debug("TCert filler stopped.")
+}
+
+// The Multi-threaded tCertPool is currently not used.
+// It plays only a role in testing.
+type tCertPoolMultithreadingImpl struct {
+ client *clientImpl
+ poolEntries map[string]*tCertPoolEntry
+ entriesMutex *sync.Mutex
+}
+
+//Start starts the pool processing.
+func (tCertPool *tCertPoolMultithreadingImpl) Start() (err error) {
+ // Start the filler, initializes a poolEntry without attributes.
+ var attributes []string
+ poolEntry, err := tCertPool.getPoolEntry(attributes)
+ if err != nil {
+ return err
+ }
+ return poolEntry.Start()
+}
+
+func (tCertPool *tCertPoolMultithreadingImpl) lockEntries() {
+ tCertPool.entriesMutex.Lock()
+}
+
+func (tCertPool *tCertPoolMultithreadingImpl) releaseEntries() {
+ tCertPool.entriesMutex.Unlock()
+ runtime.Gosched()
+}
+
+//Stop stops the pool.
+func (tCertPool *tCertPoolMultithreadingImpl) Stop() (err error) {
+ // Stop the filler
+ tCertPool.lockEntries()
+ defer tCertPool.releaseEntries()
+ for _, entry := range tCertPool.poolEntries {
+ err := entry.Stop()
+ if err != nil {
+ return err
+ }
+ }
+ return
+}
+
+//Returns a tCertPoolEntry for the attributes "attributes", if the tCertPoolEntry doesn't exists a new tCertPoolEntry will be create for that attributes.
+func (tCertPool *tCertPoolMultithreadingImpl) getPoolEntryFromHash(attributeHash string) *tCertPoolEntry {
+ tCertPool.lockEntries()
+ defer tCertPool.releaseEntries()
+ poolEntry := tCertPool.poolEntries[attributeHash]
+ return poolEntry
+
+}
+
+//Returns a tCertPoolEntry for the attributes "attributes", if the tCertPoolEntry doesn't exists a new tCertPoolEntry will be create for that attributes.
+func (tCertPool *tCertPoolMultithreadingImpl) getPoolEntry(attributes []string) (*tCertPoolEntry, error) {
+ tCertPool.client.Debug("Getting pool entry %v \n", attributes)
+ attributeHash := calculateAttributesHash(attributes)
+ tCertPool.lockEntries()
+ defer tCertPool.releaseEntries()
+ poolEntry := tCertPool.poolEntries[attributeHash]
+ if poolEntry == nil {
+ tCertPool.client.Debugf("New pool entry %v \n", attributes)
+
+ poolEntry = newTCertPoolEntry(tCertPool.client, attributes)
+ tCertPool.poolEntries[attributeHash] = poolEntry
+ if err := poolEntry.Start(); err != nil {
+ return nil, err
+ }
+ tCertPool.client.Debugf("Pool entry started %v \n", attributes)
+
+ }
+ return poolEntry, nil
+}
+
+//GetNextTCert returns a TCert from the pool valid to the passed attributes. If no TCert is available TCA is invoked to generate it.
+func (tCertPool *tCertPoolMultithreadingImpl) GetNextTCerts(nCerts int, attributes ...string) ([]*TCertBlock, error) {
+ blocks := make([]*TCertBlock, nCerts)
+ for i := 0; i < nCerts; i++ {
+ block, err := tCertPool.getNextTCert(attributes...)
+ if err != nil {
+ return nil, err
+ }
+ blocks[i] = block
+ }
+ return blocks, nil
+}
+
+func (tCertPool *tCertPoolMultithreadingImpl) getNextTCert(attributes ...string) (tCertBlock *TCertBlock, err error) {
+ poolEntry, err := tCertPool.getPoolEntry(attributes)
+ if err != nil {
+ return nil, err
+ }
+ tCertPool.client.Debugf("Requesting tcert to the pool entry. %v", calculateAttributesHash(attributes))
+ return poolEntry.GetNextTCert(attributes...)
+}
+
+//AddTCert adds a TCert into the pool is invoked by the client after TCA is called.
+func (tCertPool *tCertPoolMultithreadingImpl) AddTCert(tCertBlock *TCertBlock) (err error) {
+ poolEntry := tCertPool.getPoolEntryFromHash(tCertBlock.attributesHash)
+ if poolEntry == nil {
+ return errors.New("No pool entry found for that attributes.")
+ }
+ tCertPool.client.Debugf("Adding %v \n.", tCertBlock.attributesHash)
+ poolEntry.AddTCert(tCertBlock)
+
+ return
+}
+
+func (tCertPool *tCertPoolMultithreadingImpl) init(client *clientImpl) (err error) {
+ tCertPool.client = client
+ tCertPool.poolEntries = make(map[string]*tCertPoolEntry)
+ tCertPool.entriesMutex = &sync.Mutex{}
+ return
+}
diff --git a/core/crypto/client_tcert_pool_st.go b/core/crypto/client_tcert_pool_st.go
new file mode 100644
index 00000000000..3edd59dd69f
--- /dev/null
+++ b/core/crypto/client_tcert_pool_st.go
@@ -0,0 +1,192 @@
+/*
+Copyright IBM Corp. 2016 All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package crypto
+
+import (
+ "encoding/hex"
+ "fmt"
+ "sort"
+ "sync"
+
+ "github.com/hyperledger/fabric/core/crypto/primitives"
+)
+
+//TCertBlock is an object that include the generated TCert and the attributes used to generate it.
+type TCertBlock struct {
+ tCert tCert
+ attributesHash string
+}
+
+//TCertDBBlock is an object used to store the TCert in the database. A raw field is used to represent the TCert and the preK0, a string field is use to the attributesHash.
+type TCertDBBlock struct {
+ tCertDER []byte
+ attributesHash string
+ preK0 []byte
+}
+
+type tCertPoolSingleThreadImpl struct {
+ client *clientImpl
+
+ empty bool
+
+ length map[string]int
+
+ tCerts map[string][]*TCertBlock
+
+ m sync.Mutex
+}
+
+//Start starts the pool processing.
+func (tCertPool *tCertPoolSingleThreadImpl) Start() (err error) {
+ tCertPool.m.Lock()
+ defer tCertPool.m.Unlock()
+
+ tCertPool.client.Debug("Starting TCert Pool...")
+
+ // Load unused TCerts if any
+ tCertDBBlocks, err := tCertPool.client.ks.loadUnusedTCerts()
+ if err != nil {
+ tCertPool.client.Errorf("Failed loading TCerts from cache: [%s]", err)
+
+ return
+ }
+
+ if len(tCertDBBlocks) > 0 {
+
+ tCertPool.client.Debug("TCerts in cache found! Loading them...")
+
+ for _, tCertDBBlock := range tCertDBBlocks {
+ tCertBlock, err := tCertPool.client.getTCertFromDER(tCertDBBlock)
+ if err != nil {
+ tCertPool.client.Errorf("Failed paring TCert [% x]: [%s]", tCertDBBlock.tCertDER, err)
+
+ continue
+ }
+ tCertPool.AddTCert(tCertBlock)
+ }
+ } //END-IF
+
+ return
+}
+
+//Stop stops the pool.
+func (tCertPool *tCertPoolSingleThreadImpl) Stop() (err error) {
+ tCertPool.m.Lock()
+ defer tCertPool.m.Unlock()
+ for k := range tCertPool.tCerts {
+ certList := tCertPool.tCerts[k]
+ certListLen := tCertPool.length[k]
+ tCertPool.client.ks.storeUnusedTCerts(certList[:certListLen])
+ }
+
+ tCertPool.client.Debug("Store unused TCerts...done!")
+
+ return
+}
+
+//calculateAttributesHash generates a unique hash using the passed attributes.
+func calculateAttributesHash(attributes []string) (attrHash string) {
+
+ keys := make([]string, len(attributes))
+
+ for _, k := range attributes {
+ keys = append(keys, k)
+ }
+
+ sort.Strings(keys)
+
+ values := make([]byte, len(keys))
+
+ for _, k := range keys {
+ vb := []byte(k)
+ for _, bval := range vb {
+ values = append(values, bval)
+ }
+ }
+ attributesHash := primitives.Hash(values)
+ return hex.EncodeToString(attributesHash)
+
+}
+
+//GetNextTCert returns a TCert from the pool valid to the passed attributes. If no TCert is available TCA is invoked to generate it.
+func (tCertPool *tCertPoolSingleThreadImpl) GetNextTCerts(nCerts int, attributes ...string) ([]*TCertBlock, error) {
+ blocks := make([]*TCertBlock, nCerts)
+ for i := 0; i < nCerts; i++ {
+ block, err := tCertPool.getNextTCert(attributes...)
+ if err != nil {
+ return nil, err
+ }
+ blocks[i] = block
+ }
+ return blocks, nil
+}
+
+func (tCertPool *tCertPoolSingleThreadImpl) getNextTCert(attributes ...string) (tCert *TCertBlock, err error) {
+
+ tCertPool.m.Lock()
+ defer tCertPool.m.Unlock()
+
+ attributesHash := calculateAttributesHash(attributes)
+
+ poolLen := tCertPool.length[attributesHash]
+
+ if poolLen <= 0 {
+ // Reload
+ if err := tCertPool.client.getTCertsFromTCA(attributesHash, attributes, tCertPool.client.conf.getTCertBatchSize()); err != nil {
+ return nil, fmt.Errorf("Failed loading TCerts from TCA")
+ }
+ }
+
+ tCert = tCertPool.tCerts[attributesHash][tCertPool.length[attributesHash]-1]
+
+ tCertPool.length[attributesHash] = tCertPool.length[attributesHash] - 1
+
+ return tCert, nil
+}
+
+//AddTCert adds a TCert into the pool is invoked by the client after TCA is called.
+func (tCertPool *tCertPoolSingleThreadImpl) AddTCert(tCertBlock *TCertBlock) (err error) {
+
+ tCertPool.client.Debugf("Adding new Cert [% x].", tCertBlock.tCert.GetCertificate().Raw)
+
+ if tCertPool.length[tCertBlock.attributesHash] <= 0 {
+ tCertPool.length[tCertBlock.attributesHash] = 0
+ }
+
+ tCertPool.length[tCertBlock.attributesHash] = tCertPool.length[tCertBlock.attributesHash] + 1
+
+ if tCertPool.tCerts[tCertBlock.attributesHash] == nil {
+
+ tCertPool.tCerts[tCertBlock.attributesHash] = make([]*TCertBlock, tCertPool.client.conf.getTCertBatchSize())
+
+ }
+
+ tCertPool.tCerts[tCertBlock.attributesHash][tCertPool.length[tCertBlock.attributesHash]-1] = tCertBlock
+
+ return nil
+}
+
+func (tCertPool *tCertPoolSingleThreadImpl) init(client *clientImpl) (err error) {
+ tCertPool.client = client
+ tCertPool.client.Debug("Init TCert Pool...")
+
+ tCertPool.tCerts = make(map[string][]*TCertBlock)
+
+ tCertPool.length = make(map[string]int)
+
+ return
+}
diff --git a/core/crypto/client_tx.go b/core/crypto/client_tx.go
new file mode 100644
index 00000000000..4c7254fdb89
--- /dev/null
+++ b/core/crypto/client_tx.go
@@ -0,0 +1,472 @@
+/*
+Copyright IBM Corp. 2016 All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package crypto
+
+import (
+ "fmt"
+
+ "github.com/golang/protobuf/proto"
+ "github.com/hyperledger/fabric/core/crypto/primitives"
+ "github.com/hyperledger/fabric/core/crypto/utils"
+ obc "github.com/hyperledger/fabric/protos"
+)
+
+func (client *clientImpl) createTransactionNonce() ([]byte, error) {
+ nonce, err := primitives.GetRandomNonce()
+ if err != nil {
+ client.Errorf("Failed creating nonce [%s].", err.Error())
+ return nil, err
+ }
+
+ return nonce, err
+}
+
+func (client *clientImpl) createDeployTx(chaincodeDeploymentSpec *obc.ChaincodeDeploymentSpec, uuid string, nonce []byte, tCert tCert, attrs ...string) (*obc.Transaction, error) {
+ // Create a new transaction
+ tx, err := obc.NewChaincodeDeployTransaction(chaincodeDeploymentSpec, uuid)
+ if err != nil {
+ client.Errorf("Failed creating new transaction [%s].", err.Error())
+ return nil, err
+ }
+
+ // Copy metadata from ChaincodeSpec
+ tx.Metadata, err = getMetadata(chaincodeDeploymentSpec.GetChaincodeSpec(), tCert, attrs...)
+ if err != nil {
+ client.Errorf("Failed creating new transaction [%s].", err.Error())
+ return nil, err
+ }
+
+ if nonce == nil {
+ tx.Nonce, err = primitives.GetRandomNonce()
+ if err != nil {
+ client.Errorf("Failed creating nonce [%s].", err.Error())
+ return nil, err
+ }
+ } else {
+ // TODO: check that it is a well formed nonce
+ tx.Nonce = nonce
+ }
+
+ // Handle confidentiality
+ if chaincodeDeploymentSpec.ChaincodeSpec.ConfidentialityLevel == obc.ConfidentialityLevel_CONFIDENTIAL {
+ // 1. set confidentiality level and nonce
+ tx.ConfidentialityLevel = obc.ConfidentialityLevel_CONFIDENTIAL
+
+ // 2. set confidentiality protocol version
+ tx.ConfidentialityProtocolVersion = client.conf.GetConfidentialityProtocolVersion()
+
+ // 3. encrypt tx
+ err = client.encryptTx(tx)
+ if err != nil {
+ client.Errorf("Failed encrypting payload [%s].", err.Error())
+ return nil, err
+
+ }
+ }
+
+ return tx, nil
+}
+
+func getMetadata(chaincodeSpec *obc.ChaincodeSpec, tCert tCert, attrs ...string) ([]byte, error) {
+ //TODO this code is being commented due temporarily is not enabled attributes encryption.
+ /*
+ isAttributesEnabled := viper.GetBool("security.attributes.enabled")
+ if !isAttributesEnabled {
+ return chaincodeSpec.Metadata, nil
+ }
+
+ if tCert == nil {
+ return nil, errors.New("Invalid TCert.")
+ }
+
+ return attributes.CreateAttributesMetadata(tCert.GetCertificate().Raw, chaincodeSpec.Metadata, tCert.GetPreK0(), attrs)
+ */
+ return chaincodeSpec.Metadata, nil
+}
+
+func (client *clientImpl) createExecuteTx(chaincodeInvocation *obc.ChaincodeInvocationSpec, uuid string, nonce []byte, tCert tCert, attrs ...string) (*obc.Transaction, error) {
+ /// Create a new transaction
+ tx, err := obc.NewChaincodeExecute(chaincodeInvocation, uuid, obc.Transaction_CHAINCODE_INVOKE)
+ if err != nil {
+ client.Errorf("Failed creating new transaction [%s].", err.Error())
+ return nil, err
+ }
+
+ // Copy metadata from ChaincodeSpec
+ tx.Metadata, err = getMetadata(chaincodeInvocation.GetChaincodeSpec(), tCert, attrs...)
+ if err != nil {
+ client.Errorf("Failed creating new transaction [%s].", err.Error())
+ return nil, err
+ }
+ if nonce == nil {
+ tx.Nonce, err = primitives.GetRandomNonce()
+ if err != nil {
+ client.Errorf("Failed creating nonce [%s].", err.Error())
+ return nil, err
+ }
+ } else {
+ // TODO: check that it is a well formed nonce
+ tx.Nonce = nonce
+ }
+
+ // Handle confidentiality
+ if chaincodeInvocation.ChaincodeSpec.ConfidentialityLevel == obc.ConfidentialityLevel_CONFIDENTIAL {
+ // 1. set confidentiality level and nonce
+ tx.ConfidentialityLevel = obc.ConfidentialityLevel_CONFIDENTIAL
+
+ // 2. set confidentiality protocol version
+ tx.ConfidentialityProtocolVersion = client.conf.GetConfidentialityProtocolVersion()
+
+ // 3. encrypt tx
+ err = client.encryptTx(tx)
+ if err != nil {
+ client.Errorf("Failed encrypting payload [%s].", err.Error())
+ return nil, err
+
+ }
+ }
+
+ return tx, nil
+}
+
+func (client *clientImpl) createQueryTx(chaincodeInvocation *obc.ChaincodeInvocationSpec, uuid string, nonce []byte, tCert tCert, attrs ...string) (*obc.Transaction, error) {
+ // Create a new transaction
+ tx, err := obc.NewChaincodeExecute(chaincodeInvocation, uuid, obc.Transaction_CHAINCODE_QUERY)
+ if err != nil {
+ client.Errorf("Failed creating new transaction [%s].", err.Error())
+ return nil, err
+ }
+
+ // Copy metadata from ChaincodeSpec
+ tx.Metadata, err = getMetadata(chaincodeInvocation.GetChaincodeSpec(), tCert, attrs...)
+ if err != nil {
+ client.Errorf("Failed creating new transaction [%s].", err.Error())
+ return nil, err
+ }
+ if nonce == nil {
+ tx.Nonce, err = primitives.GetRandomNonce()
+ if err != nil {
+ client.Errorf("Failed creating nonce [%s].", err.Error())
+ return nil, err
+ }
+ } else {
+ // TODO: check that it is a well formed nonce
+ tx.Nonce = nonce
+ }
+
+ // Handle confidentiality
+ if chaincodeInvocation.ChaincodeSpec.ConfidentialityLevel == obc.ConfidentialityLevel_CONFIDENTIAL {
+ // 1. set confidentiality level and nonce
+ tx.ConfidentialityLevel = obc.ConfidentialityLevel_CONFIDENTIAL
+
+ // 2. set confidentiality protocol version
+ tx.ConfidentialityProtocolVersion = client.conf.GetConfidentialityProtocolVersion()
+
+ // 3. encrypt tx
+ err = client.encryptTx(tx)
+ if err != nil {
+ client.Errorf("Failed encrypting payload [%s].", err.Error())
+ return nil, err
+
+ }
+ }
+
+ return tx, nil
+}
+
+func (client *clientImpl) newChaincodeDeployUsingTCert(chaincodeDeploymentSpec *obc.ChaincodeDeploymentSpec, uuid string, attributeNames []string, tCert tCert, nonce []byte) (*obc.Transaction, error) {
+ // Create a new transaction
+ tx, err := client.createDeployTx(chaincodeDeploymentSpec, uuid, nonce, tCert, attributeNames...)
+ if err != nil {
+ client.Errorf("Failed creating new deploy transaction [%s].", err.Error())
+ return nil, err
+ }
+
+ // Sign the transaction
+
+ // Append the certificate to the transaction
+ client.Debugf("Appending certificate [% x].", tCert.GetCertificate().Raw)
+ tx.Cert = tCert.GetCertificate().Raw
+
+ // Sign the transaction and append the signature
+ // 1. Marshall tx to bytes
+ rawTx, err := proto.Marshal(tx)
+ if err != nil {
+ client.Errorf("Failed marshaling tx [%s].", err.Error())
+ return nil, err
+ }
+
+ // 2. Sign rawTx and check signature
+ rawSignature, err := tCert.Sign(rawTx)
+ if err != nil {
+ client.Errorf("Failed creating signature [% x]: [%s].", rawTx, err.Error())
+ return nil, err
+ }
+
+ // 3. Append the signature
+ tx.Signature = rawSignature
+
+ client.Debugf("Appending signature: [% x]", rawSignature)
+
+ return tx, nil
+}
+
+func (client *clientImpl) newChaincodeExecuteUsingTCert(chaincodeInvocation *obc.ChaincodeInvocationSpec, uuid string, attributeKeys []string, tCert tCert, nonce []byte) (*obc.Transaction, error) {
+ /// Create a new transaction
+ tx, err := client.createExecuteTx(chaincodeInvocation, uuid, nonce, tCert, attributeKeys...)
+ if err != nil {
+ client.Errorf("Failed creating new execute transaction [%s].", err.Error())
+ return nil, err
+ }
+
+ // Sign the transaction
+
+ // Append the certificate to the transaction
+ client.Debugf("Appending certificate [% x].", tCert.GetCertificate().Raw)
+ tx.Cert = tCert.GetCertificate().Raw
+
+ // Sign the transaction and append the signature
+ // 1. Marshall tx to bytes
+ rawTx, err := proto.Marshal(tx)
+ if err != nil {
+ client.Errorf("Failed marshaling tx [%s].", err.Error())
+ return nil, err
+ }
+
+ // 2. Sign rawTx and check signature
+ rawSignature, err := tCert.Sign(rawTx)
+ if err != nil {
+ client.Errorf("Failed creating signature [% x]: [%s].", rawSignature, err.Error())
+ return nil, err
+ }
+
+ // 3. Append the signature
+ tx.Signature = rawSignature
+
+ client.Debugf("Appending signature [% x].", rawSignature)
+
+ return tx, nil
+}
+
+func (client *clientImpl) newChaincodeQueryUsingTCert(chaincodeInvocation *obc.ChaincodeInvocationSpec, uuid string, attributeNames []string, tCert tCert, nonce []byte) (*obc.Transaction, error) {
+ // Create a new transaction
+ tx, err := client.createQueryTx(chaincodeInvocation, uuid, nonce, tCert, attributeNames...)
+ if err != nil {
+ client.Errorf("Failed creating new query transaction [%s].", err.Error())
+ return nil, err
+ }
+
+ // Sign the transaction
+
+ // Append the certificate to the transaction
+ client.Debugf("Appending certificate [% x].", tCert.GetCertificate().Raw)
+ tx.Cert = tCert.GetCertificate().Raw
+
+ // Sign the transaction and append the signature
+ // 1. Marshall tx to bytes
+ rawTx, err := proto.Marshal(tx)
+ if err != nil {
+ client.Errorf("Failed marshaling tx [%s].", err.Error())
+ return nil, err
+ }
+
+ // 2. Sign rawTx and check signature
+ rawSignature, err := tCert.Sign(rawTx)
+ if err != nil {
+ client.Errorf("Failed creating signature [% x]: [%s].", rawSignature, err.Error())
+ return nil, err
+ }
+
+ // 3. Append the signature
+ tx.Signature = rawSignature
+
+ client.Debugf("Appending signature [% x].", rawSignature)
+
+ return tx, nil
+}
+
+func (client *clientImpl) newChaincodeDeployUsingECert(chaincodeDeploymentSpec *obc.ChaincodeDeploymentSpec, uuid string, nonce []byte) (*obc.Transaction, error) {
+ // Create a new transaction
+ tx, err := client.createDeployTx(chaincodeDeploymentSpec, uuid, nonce, nil)
+ if err != nil {
+ client.Errorf("Failed creating new deploy transaction [%s].", err.Error())
+ return nil, err
+ }
+
+ // Sign the transaction
+
+ // Append the certificate to the transaction
+ client.Debugf("Appending certificate [% x].", client.enrollCert.Raw)
+ tx.Cert = client.enrollCert.Raw
+
+ // Sign the transaction and append the signature
+ // 1. Marshall tx to bytes
+ rawTx, err := proto.Marshal(tx)
+ if err != nil {
+ client.Errorf("Failed marshaling tx [%s].", err.Error())
+ return nil, err
+ }
+
+ // 2. Sign rawTx and check signature
+ rawSignature, err := client.signWithEnrollmentKey(rawTx)
+ if err != nil {
+ client.Errorf("Failed creating signature [% x]: [%s].", rawTx, err.Error())
+ return nil, err
+ }
+
+ // 3. Append the signature
+ tx.Signature = rawSignature
+
+ client.Debugf("Appending signature: [% x]", rawSignature)
+
+ return tx, nil
+}
+
+func (client *clientImpl) newChaincodeExecuteUsingECert(chaincodeInvocation *obc.ChaincodeInvocationSpec, uuid string, nonce []byte) (*obc.Transaction, error) {
+ /// Create a new transaction
+ tx, err := client.createExecuteTx(chaincodeInvocation, uuid, nonce, nil)
+ if err != nil {
+ client.Errorf("Failed creating new execute transaction [%s].", err.Error())
+ return nil, err
+ }
+
+ // Sign the transaction
+
+ // Append the certificate to the transaction
+ client.Debugf("Appending certificate [% x].", client.enrollCert.Raw)
+ tx.Cert = client.enrollCert.Raw
+
+ // Sign the transaction and append the signature
+ // 1. Marshall tx to bytes
+ rawTx, err := proto.Marshal(tx)
+ if err != nil {
+ client.Errorf("Failed marshaling tx [%s].", err.Error())
+ return nil, err
+ }
+
+ // 2. Sign rawTx and check signature
+ rawSignature, err := client.signWithEnrollmentKey(rawTx)
+ if err != nil {
+ client.Errorf("Failed creating signature [% x]: [%s].", rawTx, err.Error())
+ return nil, err
+ }
+
+ // 3. Append the signature
+ tx.Signature = rawSignature
+
+ client.Debugf("Appending signature [% x].", rawSignature)
+
+ return tx, nil
+}
+
+func (client *clientImpl) newChaincodeQueryUsingECert(chaincodeInvocation *obc.ChaincodeInvocationSpec, uuid string, nonce []byte) (*obc.Transaction, error) {
+ // Create a new transaction
+ tx, err := client.createQueryTx(chaincodeInvocation, uuid, nonce, nil)
+ if err != nil {
+ client.Errorf("Failed creating new query transaction [%s].", err.Error())
+ return nil, err
+ }
+
+ // Sign the transaction
+
+ // Append the certificate to the transaction
+ client.Debugf("Appending certificate [% x].", client.enrollCert.Raw)
+ tx.Cert = client.enrollCert.Raw
+
+ // Sign the transaction and append the signature
+ // 1. Marshall tx to bytes
+ rawTx, err := proto.Marshal(tx)
+ if err != nil {
+ client.Errorf("Failed marshaling tx [%s].", err.Error())
+ return nil, err
+ }
+
+ // 2. Sign rawTx and check signature
+ rawSignature, err := client.signWithEnrollmentKey(rawTx)
+ if err != nil {
+ client.Errorf("Failed creating signature [% x]: [%s].", rawTx, err.Error())
+ return nil, err
+ }
+
+ // 3. Append the signature
+ tx.Signature = rawSignature
+
+ client.Debugf("Appending signature [% x].", rawSignature)
+
+ return tx, nil
+}
+
+// CheckTransaction is used to verify that a transaction
+// is well formed with the respect to the security layer
+// prescriptions. To be used for internal verifications.
+func (client *clientImpl) checkTransaction(tx *obc.Transaction) error {
+ if !client.isInitialized {
+ return utils.ErrNotInitialized
+ }
+
+ if tx.Cert == nil && tx.Signature == nil {
+ return utils.ErrTransactionMissingCert
+ }
+
+ if tx.Cert != nil && tx.Signature != nil {
+ // Verify the transaction
+ // 1. Unmarshal cert
+ cert, err := primitives.DERToX509Certificate(tx.Cert)
+ if err != nil {
+ client.Errorf("Failed unmarshalling cert [%s].", err.Error())
+ return err
+ }
+
+ // a. Get rid of the extensions that cannot be checked now
+ cert.UnhandledCriticalExtensions = nil
+ // b. Check against TCA certPool
+ if _, err = primitives.CheckCertAgainRoot(cert, client.tcaCertPool); err != nil {
+ client.Warningf("Failed verifing certificate against TCA cert pool [%s].", err.Error())
+ // c. Check against ECA certPool, if this check also fails then return an error
+ if _, err = primitives.CheckCertAgainRoot(cert, client.ecaCertPool); err != nil {
+ client.Warningf("Failed verifing certificate against ECA cert pool [%s].", err.Error())
+
+ return fmt.Errorf("Certificate has not been signed by a trusted authority. [%s]", err)
+ }
+ }
+
+ // 2. Marshall tx without signature
+ signature := tx.Signature
+ tx.Signature = nil
+ rawTx, err := proto.Marshal(tx)
+ if err != nil {
+ client.Errorf("Failed marshaling tx [%s].", err.Error())
+ return err
+ }
+ tx.Signature = signature
+
+ // 3. Verify signature
+ ver, err := client.verify(cert.PublicKey, rawTx, tx.Signature)
+ if err != nil {
+ client.Errorf("Failed marshaling tx [%s].", err.Error())
+ return err
+ }
+
+ if ver {
+ return nil
+ }
+
+ return utils.ErrInvalidTransactionSignature
+ }
+
+ return utils.ErrTransactionMissingCert
+}
diff --git a/core/crypto/crypto.go b/core/crypto/crypto.go
new file mode 100644
index 00000000000..409bdf3b811
--- /dev/null
+++ b/core/crypto/crypto.go
@@ -0,0 +1,160 @@
+/*
+Copyright IBM Corp. 2016 All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package crypto
+
+import (
+ obc "github.com/hyperledger/fabric/protos"
+)
+
+// Public Interfaces
+
+// NodeType represents the node's type
+type NodeType int32
+
+const (
+ // NodeClient a client
+ NodeClient NodeType = 0
+ // NodePeer a peer
+ NodePeer NodeType = 1
+ // NodeValidator a validator
+ NodeValidator NodeType = 2
+)
+
+// Node represents a crypto object having a name
+type Node interface {
+
+ // GetType returns this entity's name
+ GetType() NodeType
+
+ // GetName returns this entity's name
+ GetName() string
+}
+
+// Client is an entity able to deploy and invoke chaincode
+type Client interface {
+ Node
+
+ // NewChaincodeDeployTransaction is used to deploy chaincode.
+ NewChaincodeDeployTransaction(chaincodeDeploymentSpec *obc.ChaincodeDeploymentSpec, uuid string, attributes ...string) (*obc.Transaction, error)
+
+ // NewChaincodeExecute is used to execute chaincode's functions.
+ NewChaincodeExecute(chaincodeInvocation *obc.ChaincodeInvocationSpec, uuid string, attributes ...string) (*obc.Transaction, error)
+
+ // NewChaincodeQuery is used to query chaincode's functions.
+ NewChaincodeQuery(chaincodeInvocation *obc.ChaincodeInvocationSpec, uuid string, attributes ...string) (*obc.Transaction, error)
+
+ // DecryptQueryResult is used to decrypt the result of a query transaction
+ DecryptQueryResult(queryTx *obc.Transaction, result []byte) ([]byte, error)
+
+ // GetEnrollmentCertHandler returns a CertificateHandler whose certificate is the enrollment certificate
+ GetEnrollmentCertificateHandler() (CertificateHandler, error)
+
+ // GetTCertHandlerNext returns a CertificateHandler whose certificate is the next available TCert
+ GetTCertificateHandlerNext(attributes ...string) (CertificateHandler, error)
+
+ // GetTCertHandlerFromDER returns a CertificateHandler whose certificate is the one passed
+ GetTCertificateHandlerFromDER(tCertDER []byte) (CertificateHandler, error)
+
+ // GetNextTCert returns a slice of a requested number of (not yet used) transaction certificates
+ GetNextTCerts(nCerts int, attributes ...string) ([]tCert, error)
+}
+
+// Peer is an entity able to verify transactions
+type Peer interface {
+ Node
+
+ // GetID returns this peer's identifier
+ GetID() []byte
+
+ // GetEnrollmentID returns this peer's enrollment id
+ GetEnrollmentID() string
+
+ // TransactionPreValidation verifies that the transaction is
+ // well formed with the respect to the security layer
+ // prescriptions (i.e. signature verification).
+ TransactionPreValidation(tx *obc.Transaction) (*obc.Transaction, error)
+
+ // TransactionPreExecution verifies that the transaction is
+ // well formed with the respect to the security layer
+ // prescriptions (i.e. signature verification). If this is the case,
+ // the method prepares the transaction to be executed.
+ // TransactionPreExecution returns a clone of tx.
+ TransactionPreExecution(tx *obc.Transaction) (*obc.Transaction, error)
+
+ // Sign signs msg with this validator's signing key and outputs
+ // the signature if no error occurred.
+ Sign(msg []byte) ([]byte, error)
+
+ // Verify checks that signature if a valid signature of message under vkID's verification key.
+ // If the verification succeeded, Verify returns nil meaning no error occurred.
+ // If vkID is nil, then the signature is verified against this validator's verification key.
+ Verify(vkID, signature, message []byte) error
+
+ // GetStateEncryptor returns a StateEncryptor linked to pair defined by
+ // the deploy transaction and the execute transaction. Notice that,
+ // executeTx can also correspond to a deploy transaction.
+ GetStateEncryptor(deployTx, executeTx *obc.Transaction) (StateEncryptor, error)
+
+ GetTransactionBinding(tx *obc.Transaction) ([]byte, error)
+}
+
+// StateEncryptor is used to encrypt chaincode's state
+type StateEncryptor interface {
+
+ // Encrypt encrypts message msg
+ Encrypt(msg []byte) ([]byte, error)
+
+ // Decrypt decrypts ciphertext ct obtained
+ // from a call of the Encrypt method.
+ Decrypt(ct []byte) ([]byte, error)
+}
+
+// CertificateHandler exposes methods to deal with an ECert/TCert
+type CertificateHandler interface {
+
+ // GetCertificate returns the certificate's DER
+ GetCertificate() []byte
+
+ // Sign signs msg using the signing key corresponding to the certificate
+ Sign(msg []byte) ([]byte, error)
+
+ // Verify verifies msg using the verifying key corresponding to the certificate
+ Verify(signature []byte, msg []byte) error
+
+ // GetTransactionHandler returns a new transaction handler relative to this certificate
+ GetTransactionHandler() (TransactionHandler, error)
+}
+
+// TransactionHandler represents a single transaction that can be named by the output of the GetBinding method.
+// This transaction is linked to a single Certificate (TCert or ECert).
+type TransactionHandler interface {
+
+ // GetCertificateHandler returns the certificate handler relative to the certificate mapped to this transaction
+ GetCertificateHandler() (CertificateHandler, error)
+
+ // GetBinding returns a binding to the underlying transaction
+ GetBinding() ([]byte, error)
+
+ // NewChaincodeDeployTransaction is used to deploy chaincode
+ NewChaincodeDeployTransaction(chaincodeDeploymentSpec *obc.ChaincodeDeploymentSpec, uuid string, attributeNames ...string) (*obc.Transaction, error)
+
+ // NewChaincodeExecute is used to execute chaincode's functions
+ NewChaincodeExecute(chaincodeInvocation *obc.ChaincodeInvocationSpec, uuid string, attributeNames ...string) (*obc.Transaction, error)
+
+ // NewChaincodeQuery is used to query chaincode's functions
+ NewChaincodeQuery(chaincodeInvocation *obc.ChaincodeInvocationSpec, uuid string, attributeNames ...string) (*obc.Transaction, error)
+}
diff --git a/core/crypto/crypto_profile.sh b/core/crypto/crypto_profile.sh
new file mode 100755
index 00000000000..8061805aa0f
--- /dev/null
+++ b/core/crypto/crypto_profile.sh
@@ -0,0 +1,16 @@
+#!/usr/bin/env bash
+
+if [ -f "crypto.test" ]
+then
+ echo Removing crypto.test
+ rm crypto.test
+fi
+
+echo Compile, banchmark, pprof...
+
+go test -c
+#./crypto.test -test.cpuprofile=crypto.prof
+#./crypto.test -test.bench BenchmarkConfidentialTCertHExecuteTransaction -test.run XXX -test.cpuprofile=crypto.prof
+./crypto.test -test.bench BenchmarkTransaction -test.run XXX -test.cpuprofile=crypto.prof
+#./crypto.test -test.run TestParallelInitClose -test.cpuprofile=crypto.prof
+go tool pprof crypto.test crypto.prof
\ No newline at end of file
diff --git a/core/crypto/crypto_protocol.go b/core/crypto/crypto_protocol.go
new file mode 100644
index 00000000000..d3654c3951c
--- /dev/null
+++ b/core/crypto/crypto_protocol.go
@@ -0,0 +1,39 @@
+/*
+Copyright IBM Corp. 2016 All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package crypto
+
+import (
+ obc "github.com/hyperledger/fabric/protos"
+)
+
+type nodeProtocol interface {
+ init(node Node)
+
+ parseTransaction(tx *obc.Transaction) (*obc.Transaction, error)
+}
+
+type clientProtocol interface {
+ nodeProtocol
+
+ decryptQueryResult(queryTx *obc.Transaction, result []byte) ([]byte, error)
+}
+
+type validatorProtocol interface {
+ nodeProtocol
+
+ getStateEncryptor(deployTx, executeTx *obc.Transaction) (StateEncryptor, error)
+}
diff --git a/core/crypto/crypto_settings.go b/core/crypto/crypto_settings.go
new file mode 100644
index 00000000000..aec68621262
--- /dev/null
+++ b/core/crypto/crypto_settings.go
@@ -0,0 +1,57 @@
+/*
+Copyright IBM Corp. 2016 All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package crypto
+
+import (
+ "github.com/hyperledger/fabric/core/crypto/primitives"
+ "github.com/op/go-logging"
+ "github.com/spf13/viper"
+)
+
+var (
+ log = logging.MustGetLogger("crypto")
+)
+
+// Init initializes the crypto layer. It load from viper the security level
+// and the logging setting.
+func Init() (err error) {
+ // Init security level
+ securityLevel := 256
+ if viper.IsSet("security.level") {
+ ovveride := viper.GetInt("security.level")
+ if ovveride != 0 {
+ securityLevel = ovveride
+ }
+ }
+
+ hashAlgorithm := "SHA3"
+ if viper.IsSet("security.hashAlgorithm") {
+ ovveride := viper.GetString("security.hashAlgorithm")
+ if ovveride != "" {
+ hashAlgorithm = ovveride
+ }
+ }
+
+ log.Debugf("Working at security level [%d]", securityLevel)
+ if err = primitives.InitSecurityLevel(hashAlgorithm, securityLevel); err != nil {
+ log.Errorf("Failed setting security level: [%s]", err)
+
+ return
+ }
+
+ return
+}
diff --git a/core/crypto/crypto_settings_test.go b/core/crypto/crypto_settings_test.go
new file mode 100755
index 00000000000..5789bfcc4c6
--- /dev/null
+++ b/core/crypto/crypto_settings_test.go
@@ -0,0 +1,49 @@
+/*
+Copyright IBM Corp. 2016 All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package crypto
+
+import (
+ "testing"
+
+ "github.com/op/go-logging"
+ "github.com/spf13/viper"
+)
+
+func TestCryptoInitInheritsLoggingLevel(t *testing.T) {
+ logging.SetLevel(logging.WARNING, "crypto")
+
+ Init()
+
+ assertCryptoLoggingLevel(t, logging.WARNING)
+}
+
+func TestCryptoInitDoesntOverrideLoggingLevel(t *testing.T) {
+ logging.SetLevel(logging.WARNING, "crypto")
+ viper.Set("logging.crypto", "info")
+
+ Init()
+
+ assertCryptoLoggingLevel(t, logging.WARNING)
+}
+
+func assertCryptoLoggingLevel(t *testing.T, expected logging.Level) {
+ actual := logging.GetLevel("crypto")
+
+ if expected != actual {
+ t.Errorf("Expected %v, got %v", expected, actual)
+ }
+}
diff --git a/core/crypto/crypto_test.go b/core/crypto/crypto_test.go
new file mode 100644
index 00000000000..ea3853a7ca4
--- /dev/null
+++ b/core/crypto/crypto_test.go
@@ -0,0 +1,2154 @@
+/*
+Copyright IBM Corp. 2016 All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package crypto
+
+import (
+ obc "github.com/hyperledger/fabric/protos"
+ "github.com/op/go-logging"
+
+ "bytes"
+ "fmt"
+ "io/ioutil"
+ "net"
+ "os"
+ "path/filepath"
+ "reflect"
+ "testing"
+
+ "crypto/rand"
+
+ "runtime"
+ "time"
+
+ "github.com/hyperledger/fabric/core/crypto/attributes"
+ "github.com/hyperledger/fabric/core/crypto/primitives"
+ "github.com/hyperledger/fabric/core/crypto/utils"
+ "github.com/hyperledger/fabric/core/util"
+ "github.com/hyperledger/fabric/membersrvc/ca"
+ "github.com/spf13/viper"
+ "google.golang.org/grpc"
+ "google.golang.org/grpc/credentials"
+)
+
+type createTxFunc func(t *testing.T) (*obc.Transaction, *obc.Transaction, error)
+
+var (
+ validator Peer
+
+ peer Peer
+
+ deployer Client
+ invoker Client
+
+ server *grpc.Server
+ aca *ca.ACA
+ eca *ca.ECA
+ tca *ca.TCA
+ tlsca *ca.TLSCA
+
+ deployTxCreators []createTxFunc
+ executeTxCreators []createTxFunc
+ queryTxCreators []createTxFunc
+
+ ksPwd = []byte("This is a very very very long pw")
+
+ attrs = []string{"company", "position"}
+)
+
+func TestMain(m *testing.M) {
+ // Setup the test
+ setup()
+
+ //Define a map to store the scenarios properties
+ properties := make(map[string]interface{})
+ ret := 0
+
+ //First scenario with crypto_test.yaml
+ ret = runTestsOnScenario(m, properties, "Using crypto_test.yaml properties")
+ if ret != 0 {
+ os.Exit(ret)
+ }
+
+ //Fourth scenario with security level = 384
+ properties["security.hashAlgorithm"] = "SHA3"
+ properties["security.level"] = "384"
+ ret = runTestsOnScenario(m, properties, "Using SHA3-384")
+ if ret != 0 {
+ os.Exit(ret)
+ }
+
+ //Fifth scenario with SHA2
+ properties["security.hashAlgorithm"] = "SHA2"
+ properties["security.level"] = "256"
+ ret = runTestsOnScenario(m, properties, "Using SHA2-256")
+ if ret != 0 {
+ os.Exit(ret)
+ }
+
+ //Sixth scenario with SHA2
+ properties["security.hashAlgorithm"] = "SHA2"
+ properties["security.level"] = "384"
+ ret = runTestsOnScenario(m, properties, "Using SHA2-384")
+ if ret != 0 {
+ os.Exit(ret)
+ }
+
+ os.Exit(ret)
+}
+
+//loadConfigScennario loads the properties in the viper and returns the current values.
+func loadConfigScenario(properties map[string]interface{}) map[string]interface{} {
+ currentValues := make(map[string]interface{})
+ for k, v := range properties {
+ currentValues[k] = viper.Get(k)
+ viper.Set(k, v)
+ }
+ return currentValues
+}
+
+func before() {
+ // Init PKI
+ initPKI()
+ go startPKI()
+}
+
+func after() {
+ cleanup()
+}
+
+func runTestsOnScenario(m *testing.M, properties map[string]interface{}, scenarioName string) int {
+ fmt.Printf("=== Start tests for scenario '%v' ===\n", scenarioName)
+ currentValues := make(map[string]interface{})
+ if len(properties) > 0 {
+ currentValues = loadConfigScenario(properties)
+ }
+ primitives.SetSecurityLevel(viper.GetString("security.hashAlgorithm"), viper.GetInt("security.level"))
+
+ before()
+ ret := m.Run()
+ after()
+
+ if len(properties) > 0 {
+ _ = loadConfigScenario(currentValues)
+ }
+ fmt.Printf("=== End tests for scenario '%v' ===\n", scenarioName)
+ return ret
+}
+
+func TestParallelInitClose(t *testing.T) {
+ clientConf := utils.NodeConfiguration{Type: "client", Name: "userthread"}
+ peerConf := utils.NodeConfiguration{Type: "peer", Name: "peerthread"}
+ validatorConf := utils.NodeConfiguration{Type: "validator", Name: "validatorthread"}
+
+ if err := RegisterClient(clientConf.Name, nil, clientConf.GetEnrollmentID(), clientConf.GetEnrollmentPWD()); err != nil {
+ t.Fatalf("Failed registerting userthread.")
+ }
+
+ if err := RegisterPeer(peerConf.Name, nil, peerConf.GetEnrollmentID(), peerConf.GetEnrollmentPWD()); err != nil {
+ t.Fatalf("Failed registerting peerthread")
+ }
+
+ if err := RegisterValidator(validatorConf.Name, nil, validatorConf.GetEnrollmentID(), validatorConf.GetEnrollmentPWD()); err != nil {
+ t.Fatalf("Failed registerting validatorthread")
+ }
+
+ done := make(chan bool)
+
+ n := 10
+ var peer Peer
+ var validator Peer
+ var client Client
+
+ var err error
+ for i := 0; i < n; i++ {
+ go func() {
+ if err := RegisterPeer(peerConf.Name, nil, peerConf.GetEnrollmentID(), peerConf.GetEnrollmentPWD()); err != nil {
+ t.Logf("Failed registerting peerthread")
+ }
+ peer, err = InitPeer(peerConf.Name, nil)
+ if err != nil {
+ t.Logf("Failed peer initialization [%s]", err)
+ }
+
+ if err := RegisterValidator(validatorConf.Name, nil, validatorConf.GetEnrollmentID(), validatorConf.GetEnrollmentPWD()); err != nil {
+ t.Logf("Failed registerting validatorthread")
+ }
+ validator, err = InitValidator(validatorConf.Name, nil)
+ if err != nil {
+ t.Logf("Failed validator initialization [%s]", err)
+ }
+
+ if err := RegisterClient(clientConf.Name, nil, clientConf.GetEnrollmentID(), clientConf.GetEnrollmentPWD()); err != nil {
+ t.Logf("Failed registerting userthread.")
+ }
+ client, err = InitClient(clientConf.Name, nil)
+ if err != nil {
+ t.Logf("Failed client initialization [%s]", err)
+ }
+
+ for i := 0; i < 5; i++ {
+ client, err := InitClient(clientConf.Name, nil)
+ if err != nil {
+ t.Logf("Failed client initialization [%s]", err)
+ }
+
+ runtime.Gosched()
+ time.Sleep(500 * time.Millisecond)
+
+ err = CloseClient(client)
+ if err != nil {
+ t.Logf("Failed client closing [%s]", err)
+ }
+ }
+ done <- true
+ }()
+ }
+
+ for i := 0; i < n; i++ {
+ log.Info("Waiting")
+ <-done
+ log.Info("+1")
+ }
+
+ // Close Client, Peer and Validator n times
+ for i := 0; i < n; i++ {
+ if err := CloseClient(client); err != nil {
+ t.Fatalf("Client should be still closable. [%d][%d]", i, n)
+ }
+
+ if err := ClosePeer(peer); err != nil {
+ t.Fatalf("Peer should be still closable. [%d][%d]", i, n)
+ }
+
+ if err := CloseValidator(validator); err != nil {
+ t.Fatalf("Validator should be still closable. [%d][%d]", i, n)
+ }
+ }
+}
+
+func TestRegistrationSameEnrollIDDifferentRole(t *testing.T) {
+ conf := utils.NodeConfiguration{Type: "client", Name: "TestRegistrationSameEnrollIDDifferentRole"}
+ if err := RegisterClient(conf.Name, nil, conf.GetEnrollmentID(), conf.GetEnrollmentPWD()); err != nil {
+ t.Fatalf("Failed client registration [%s]", err)
+ }
+
+ if err := RegisterValidator(conf.Name, nil, conf.GetEnrollmentID(), conf.GetEnrollmentPWD()); err == nil {
+ t.Fatal("Reusing the same enrollment id must be forbidden", err)
+ }
+
+ if err := RegisterPeer(conf.Name, nil, conf.GetEnrollmentID(), conf.GetEnrollmentPWD()); err == nil {
+ t.Fatal("Reusing the same enrollment id must be forbidden", err)
+ }
+}
+
+func TestTLSCertificateDeletion(t *testing.T) {
+ conf := utils.NodeConfiguration{Type: "peer", Name: "peer"}
+
+ peer, err := registerAndReturnPeer(conf.Name, nil, conf.GetEnrollmentID(), conf.GetEnrollmentPWD())
+ if err != nil {
+ t.Fatalf("Failed peer registration [%s]", err)
+ }
+
+ if peer.ks.certMissing(peer.conf.getTLSCertFilename()) {
+ t.Fatal("TLS shouldn't be missing after peer registration")
+ }
+
+ if err := peer.deleteTLSCertificate(conf.GetEnrollmentID(), conf.GetEnrollmentPWD()); err != nil {
+ t.Fatalf("Failed deleting TLS certificate [%s]", err)
+ }
+
+ if !peer.ks.certMissing(peer.conf.getTLSCertFilename()) {
+ t.Fatal("TLS certificate should be missing after deletion")
+ }
+}
+
+func TestRegistrationAfterDeletingTLSCertificate(t *testing.T) {
+ conf := utils.NodeConfiguration{Type: "peer", Name: "peer"}
+
+ peer, err := registerAndReturnPeer(conf.Name, nil, conf.GetEnrollmentID(), conf.GetEnrollmentPWD())
+ if err != nil {
+ t.Fatalf("Failed peer registration [%s]", err)
+ }
+
+ if err := peer.deleteTLSCertificate(conf.GetEnrollmentID(), conf.GetEnrollmentPWD()); err != nil {
+ t.Fatalf("Failed deleting TLS certificate [%s]", err)
+ }
+
+ if _, err := registerAndReturnPeer(conf.Name, nil, conf.GetEnrollmentID(), conf.GetEnrollmentPWD()); err != nil {
+ t.Fatalf("Failed peer registration [%s]", err)
+ }
+}
+
+func registerAndReturnPeer(name string, pwd []byte, enrollID, enrollPWD string) (*peerImpl, error) {
+ peer := newPeer()
+ if err := peer.register(NodePeer, name, pwd, enrollID, enrollPWD, nil); err != nil {
+ return nil, err
+ }
+ if err := peer.close(); err != nil {
+ return nil, err
+ }
+ return peer, nil
+}
+
+func TestInitialization(t *testing.T) {
+ // Init fake client
+ client, err := InitClient("", nil)
+ if err == nil || client != nil {
+ t.Fatal("Init should fail")
+ }
+ err = CloseClient(client)
+ if err == nil {
+ t.Fatal("Close should fail")
+ }
+
+ // Init fake peer
+ peer, err = InitPeer("", nil)
+ if err == nil || peer != nil {
+ t.Fatal("Init should fail")
+ }
+ err = ClosePeer(peer)
+ if err == nil {
+ t.Fatal("Close should fail")
+ }
+
+ // Init fake validator
+ validator, err = InitValidator("", nil)
+ if err == nil || validator != nil {
+ t.Fatal("Init should fail")
+ }
+ err = CloseValidator(validator)
+ if err == nil {
+ t.Fatal("Close should fail")
+ }
+}
+
+func TestClientDeployTransaction(t *testing.T) {
+ initNodes()
+ defer closeNodes()
+ for i, createTx := range deployTxCreators {
+ t.Logf("TestClientDeployTransaction with [%d]\n", i)
+
+ _, tx, err := createTx(t)
+
+ if err != nil {
+ t.Fatalf("Failed creating deploy transaction [%s].", err)
+ }
+
+ if tx == nil {
+ t.Fatalf("Result must be different from nil")
+ }
+
+ // Check transaction. For test purposes only
+ err = deployer.(*clientImpl).checkTransaction(tx)
+ if err != nil {
+ t.Fatalf("Failed checking transaction [%s].", err)
+ }
+ }
+}
+
+func TestClientExecuteTransaction(t *testing.T) {
+ initNodes()
+ defer closeNodes()
+ for i, createTx := range executeTxCreators {
+ t.Logf("TestClientExecuteTransaction with [%d]\n", i)
+
+ _, tx, err := createTx(t)
+
+ if err != nil {
+ t.Fatalf("Failed creating deploy transaction [%s].", err)
+ }
+
+ if tx == nil {
+ t.Fatalf("Result must be different from nil")
+ }
+
+ // Check transaction. For test purposes only
+ err = invoker.(*clientImpl).checkTransaction(tx)
+ if err != nil {
+ t.Fatalf("Failed checking transaction [%s].", err)
+ }
+ }
+}
+
+func TestClientQueryTransaction(t *testing.T) {
+ initNodes()
+ defer closeNodes()
+ for i, createTx := range queryTxCreators {
+ t.Logf("TestClientQueryTransaction with [%d]\n", i)
+
+ _, tx, err := createTx(t)
+
+ if err != nil {
+ t.Fatalf("Failed creating deploy transaction [%s].", err)
+ }
+
+ if tx == nil {
+ t.Fatalf("Result must be different from nil")
+ }
+
+ // Check transaction. For test purposes only
+ err = invoker.(*clientImpl).checkTransaction(tx)
+ if err != nil {
+ t.Fatalf("Failed checking transaction [%s].", err)
+ }
+ }
+}
+
+func TestClientMultiExecuteTransaction(t *testing.T) {
+ initNodes()
+ defer closeNodes()
+ for i := 0; i < 24; i++ {
+ _, tx, err := createConfidentialExecuteTransaction(t)
+
+ if err != nil {
+ t.Fatalf("Failed creating execute transaction [%s].", err)
+ }
+
+ if tx == nil {
+ t.Fatalf("Result must be different from nil")
+ }
+
+ // Check transaction. For test purposes only
+ err = invoker.(*clientImpl).checkTransaction(tx)
+ if err != nil {
+ t.Fatalf("Failed checking transaction [%s].", err)
+ }
+ }
+}
+
+func TestClientGetNextTCerts(t *testing.T) {
+
+ // Some positive flow tests here
+ var nCerts int = 1
+ for i := 1; i < 3; i++ {
+ nCerts *= 10
+ fmt.Println(fmt.Sprintf("Calling GetNextTCerts(%d)", nCerts))
+ rvCerts, err := deployer.GetNextTCerts(nCerts)
+ if err != nil {
+ t.Fatalf("Could not receive %d TCerts", nCerts)
+ }
+ if len(rvCerts) != nCerts {
+ t.Fatalf("Expected exactly '%d' TCerts as a return from GetNextTCert(%d)", nCerts, nCerts)
+ }
+
+ for nPos, cert := range rvCerts {
+ if cert == nil {
+ t.Fatalf("Returned TCert (at position %d) cannot be nil", nPos)
+ }
+ }
+ }
+
+ // Some negative flow tests here
+ _, err := deployer.GetNextTCerts(0)
+ if err == nil {
+ t.Fatalf("Requesting 0 TCerts: expected an error when calling GetNextTCerts(0)")
+ }
+
+ _, err = deployer.GetNextTCerts(-1)
+ if err == nil {
+ t.Fatalf("Requesting -1 TCerts: expected an error when calling GetNextTCerts(-1)")
+ }
+
+}
+
+//TestClientGetAttributesFromTCert verifies that the value read from the TCert is the expected value "ACompany".
+func TestClientGetAttributesFromTCert(t *testing.T) {
+ initNodes()
+ defer closeNodes()
+
+ tCerts, err := deployer.GetNextTCerts(1, attrs...)
+
+ if err != nil {
+ t.Fatalf("Failed getting TCert by calling GetNextTCerts(1): [%s]", err)
+ }
+ if tCerts == nil {
+ t.Fatalf("TCert should be different from nil")
+ }
+ if len(tCerts) != 1 {
+ t.Fatalf("Expected one TCert returned from GetNextTCerts(1)")
+ }
+
+ tcertDER := tCerts[0].GetCertificate().Raw
+
+ if tcertDER == nil {
+ t.Fatalf("Cert should be different from nil")
+ }
+ if len(tcertDER) == 0 {
+ t.Fatalf("Cert should have length > 0")
+ }
+
+ attributeBytes, err := attributes.GetValueForAttribute("company", tCerts[0].GetPreK0(), tCerts[0].GetCertificate())
+ if err != nil {
+ t.Fatalf("Error retrieving attribute from TCert: [%s]", err)
+ }
+
+ attributeValue := string(attributeBytes[:])
+
+ if attributeValue != "ACompany" {
+ t.Fatalf("Wrong attribute retrieved from TCert. Expected [%s], Actual [%s]", "ACompany", attributeValue)
+ }
+}
+
+func TestClientGetAttributesFromTCertWithUnusedTCerts(t *testing.T) {
+ initNodes()
+ defer closeNodes()
+
+ _, _ = deployer.GetNextTCerts(1, attrs...)
+
+ after() //Tear down the server.
+ before() //Start up again to use unsed TCerts
+
+ tcerts, err := deployer.GetNextTCerts(1, attrs...)
+
+ if err != nil {
+ t.Fatalf("Failed getting tcert: [%s]", err)
+ }
+ if tcerts == nil {
+ t.Fatalf("Returned TCerts slice should be different from nil")
+ }
+ if tcerts[0] == nil {
+ t.Fatalf("Returned TCerts slice's first entry should be different from nil")
+ }
+
+ tcertDER := tcerts[0].GetCertificate().Raw
+
+ if tcertDER == nil {
+ t.Fatalf("Cert should be different from nil")
+ }
+ if len(tcertDER) == 0 {
+ t.Fatalf("Cert should have length > 0")
+ }
+
+ attributeBytes, err := attributes.GetValueForAttribute("company", tcerts[0].GetPreK0(), tcerts[0].GetCertificate())
+ if err != nil {
+ t.Fatalf("Error retrieving attribute from TCert: [%s]", err)
+ }
+
+ attributeValue := string(attributeBytes[:])
+
+ if attributeValue != "ACompany" {
+ t.Fatalf("Wrong attribute retrieved from TCert. Expected [%s], Actual [%s]", "ACompany", attributeValue)
+ }
+}
+
+func TestClientGetTCertHandlerNext(t *testing.T) {
+ initNodes()
+ defer closeNodes()
+
+ handler, err := deployer.GetTCertificateHandlerNext(attrs...)
+
+ if err != nil {
+ t.Fatalf("Failed getting handler: [%s]", err)
+ }
+ if handler == nil {
+ t.Fatalf("Handler should be different from nil")
+ }
+
+ certDER := handler.GetCertificate()
+
+ if certDER == nil {
+ t.Fatalf("Cert should be different from nil")
+ }
+ if len(certDER) == 0 {
+ t.Fatalf("Cert should have length > 0")
+ }
+}
+
+func TestClientGetTCertHandlerFromDER(t *testing.T) {
+ initNodes()
+ defer closeNodes()
+
+ handler, err := deployer.GetTCertificateHandlerNext(attrs...)
+ if err != nil {
+ t.Fatalf("Failed getting handler: [%s]", err)
+ }
+
+ handler2, err := deployer.GetTCertificateHandlerFromDER(handler.GetCertificate())
+ if err != nil {
+ t.Fatalf("Failed getting tcert: [%s]", err)
+ }
+ if handler == nil {
+ t.Fatalf("Handler should be different from nil")
+ }
+ tCertDER := handler2.GetCertificate()
+ if tCertDER == nil {
+ t.Fatalf("TCert should be different from nil")
+ }
+ if len(tCertDER) == 0 {
+ t.Fatalf("TCert should have length > 0")
+ }
+
+ if !reflect.DeepEqual(handler.GetCertificate(), tCertDER) {
+ t.Fatalf("TCerts must be the same")
+ }
+}
+
+func TestClientTCertHandlerSign(t *testing.T) {
+ initNodes()
+ defer closeNodes()
+
+ handlerDeployer, err := deployer.GetTCertificateHandlerNext(attrs...)
+ if err != nil {
+ t.Fatalf("Failed getting handler: [%s]", err)
+ }
+
+ msg := []byte("Hello World!!!")
+ signature, err := handlerDeployer.Sign(msg)
+ if err != nil {
+ t.Fatalf("Failed getting tcert: [%s]", err)
+ }
+ if signature == nil || len(signature) == 0 {
+ t.Fatalf("Failed getting non-nil signature")
+ }
+
+ err = handlerDeployer.Verify(signature, msg)
+ if err != nil {
+ t.Fatalf("Failed verifying signature: [%s]", err)
+ }
+
+ // Check that deployer can reload the cert handler from DER and sign
+ handlerDeployer2, err := deployer.GetTCertificateHandlerFromDER(handlerDeployer.GetCertificate())
+ if err != nil {
+ t.Fatalf("Failed getting tcert: [%s]", err)
+ }
+
+ msg = []byte("Hello World!!!")
+ signature, err = handlerDeployer2.Sign(msg)
+ if err != nil {
+ t.Fatalf("Failed getting tcert: [%s]", err)
+ }
+ if signature == nil || len(signature) == 0 {
+ t.Fatalf("Failed getting non-nil signature")
+ }
+
+ err = handlerDeployer2.Verify(signature, msg)
+ if err != nil {
+ t.Fatalf("Failed verifying signature: [%s]", err)
+ }
+
+ // Check that invoker (another party) can verify the signature
+ handlerInvoker, err := invoker.GetTCertificateHandlerFromDER(handlerDeployer.GetCertificate())
+ if err != nil {
+ t.Fatalf("Failed getting tcert: [%s]", err)
+ }
+
+ err = handlerInvoker.Verify(signature, msg)
+ if err != nil {
+ t.Fatalf("Failed verifying signature: [%s]", err)
+ }
+
+ // Check that invoker cannot sign using a tcert obtained by the deployer
+ signature, err = handlerInvoker.Sign(msg)
+ if err == nil {
+ t.Fatalf("Bob should not be able to use Alice's tcert to sign")
+ }
+ if signature != nil {
+ t.Fatalf("Signature should be nil")
+ }
+}
+
+func TestClientGetEnrollmentCertHandler(t *testing.T) {
+ initNodes()
+ defer closeNodes()
+
+ handler, err := deployer.GetEnrollmentCertificateHandler()
+
+ if err != nil {
+ t.Fatalf("Failed getting handler: [%s]", err)
+ }
+ if handler == nil {
+ t.Fatalf("Handler should be different from nil")
+ }
+
+ certDER := handler.GetCertificate()
+
+ if certDER == nil {
+ t.Fatalf("Cert should be different from nil")
+ }
+ if len(certDER) == 0 {
+ t.Fatalf("Cert should have length > 0")
+ }
+}
+
+func TestClientGetEnrollmentCertHandlerSign(t *testing.T) {
+ initNodes()
+ defer closeNodes()
+
+ handlerDeployer, err := deployer.GetEnrollmentCertificateHandler()
+ if err != nil {
+ t.Fatalf("Failed getting handler: [%s]", err)
+ }
+
+ msg := []byte("Hello World!!!")
+ signature, err := handlerDeployer.Sign(msg)
+ if err != nil {
+ t.Fatalf("Failed getting tcert: [%s]", err)
+ }
+ if signature == nil || len(signature) == 0 {
+ t.Fatalf("Failed getting non-nil signature")
+ }
+
+ err = handlerDeployer.Verify(signature, msg)
+ if err != nil {
+ t.Fatalf("Failed verifying signature: [%s]", err)
+ }
+
+ // Check that invoker (another party) can verify the signature
+ handlerInvoker, err := invoker.GetEnrollmentCertificateHandler()
+ if err != nil {
+ t.Fatalf("Failed getting tcert: [%s]", err)
+ }
+
+ err = handlerInvoker.Verify(signature, msg)
+ if err == nil {
+ t.Fatalf("Failed verifying signature: [%s]", err)
+ }
+
+}
+
+func TestPeerID(t *testing.T) {
+ initNodes()
+ defer closeNodes()
+
+ // Verify that any id modification doesn't change
+ id := peer.GetID()
+
+ if id == nil {
+ t.Fatalf("Id is nil.")
+ }
+
+ if len(id) == 0 {
+ t.Fatalf("Id length is zero.")
+ }
+
+ id[0] = id[0] + 1
+ id2 := peer.GetID()
+ if id2[0] == id[0] {
+ t.Fatalf("Invariant not respected.")
+ }
+}
+
+func TestPeerDeployTransaction(t *testing.T) {
+ initNodes()
+ defer closeNodes()
+
+ for i, createTx := range deployTxCreators {
+ t.Logf("TestPeerDeployTransaction with [%d]\n", i)
+
+ _, tx, err := createTx(t)
+ if err != nil {
+ t.Fatalf("TransactionPreValidation: failed creating transaction [%s].", err)
+ }
+
+ res, err := peer.TransactionPreValidation(tx)
+ if err != nil {
+ t.Fatalf("Error must be nil [%s].", err)
+ }
+ if res == nil {
+ t.Fatalf("Result must be diffrent from nil")
+ }
+
+ res, err = peer.TransactionPreExecution(tx)
+ if err != utils.ErrNotImplemented {
+ t.Fatalf("Error must be ErrNotImplemented [%s].", err)
+ }
+ if res != nil {
+ t.Fatalf("Result must nil")
+ }
+
+ // Test no Cert
+ oldCert := tx.Cert
+ tx.Cert = nil
+ _, err = peer.TransactionPreValidation(tx)
+ if err == nil {
+ t.Fatalf("Pre Validatiotn should fail. No Cert. %s", err)
+ }
+ tx.Cert = oldCert
+
+ // Test no Signature
+ oldSig := tx.Signature
+ tx.Signature = nil
+ _, err = peer.TransactionPreValidation(tx)
+ if err == nil {
+ t.Fatalf("Pre Validatiotn should fail. No Signature. %s", err)
+ }
+ tx.Signature = oldSig
+
+ // Test Invalid Cert
+ oldCert = tx.Cert
+ tx.Cert = []byte{0, 1, 2, 3, 4}
+ _, err = peer.TransactionPreValidation(tx)
+ if err == nil {
+ t.Fatalf("Pre Validatiotn should fail. Invalid Cert. %s", err)
+ }
+ tx.Cert = oldCert
+
+ // Test self signed certificate Cert
+ oldCert = tx.Cert
+ rawSelfSignedCert, _, err := primitives.NewSelfSignedCert()
+ if err != nil {
+ t.Fatalf("Failed creating self signed cert [%s]", err)
+ }
+ tx.Cert = rawSelfSignedCert
+ _, err = peer.TransactionPreValidation(tx)
+ if err == nil {
+ t.Fatalf("Pre Validatiotn should fail. Invalid Cert. %s", err)
+ }
+ tx.Cert = oldCert
+
+ // Test invalid Signature
+ oldSig = tx.Signature
+ tx.Signature = []byte{0, 1, 2, 3, 4}
+ _, err = peer.TransactionPreValidation(tx)
+ if err == nil {
+ t.Fatalf("Pre Validatiotn should fail. Invalid Signature. %s", err)
+ }
+ tx.Signature = oldSig
+ }
+}
+
+func TestPeerExecuteTransaction(t *testing.T) {
+ initNodes()
+ defer closeNodes()
+
+ for i, createTx := range executeTxCreators {
+ t.Logf("TestPeerExecuteTransaction with [%d]\n", i)
+
+ _, tx, err := createTx(t)
+ if err != nil {
+ t.Fatalf("TransactionPreValidation: failed creating transaction [%s].", err)
+ }
+
+ res, err := peer.TransactionPreValidation(tx)
+ if err != nil {
+ t.Fatalf("Error must be nil [%s].", err)
+ }
+ if res == nil {
+ t.Fatalf("Result must be diffrent from nil")
+ }
+
+ res, err = peer.TransactionPreExecution(tx)
+ if err != utils.ErrNotImplemented {
+ t.Fatalf("Error must be ErrNotImplemented [%s].", err)
+ }
+ if res != nil {
+ t.Fatalf("Result must nil")
+ }
+ }
+}
+
+func TestPeerQueryTransaction(t *testing.T) {
+ initNodes()
+ defer closeNodes()
+
+ for i, createTx := range queryTxCreators {
+ t.Logf("TestPeerQueryTransaction with [%d]\n", i)
+
+ _, tx, err := createTx(t)
+ if err != nil {
+ t.Fatalf("Failed creating query transaction [%s].", err)
+ }
+
+ res, err := peer.TransactionPreValidation(tx)
+ if err != nil {
+ t.Fatalf("Error must be nil [%s].", err)
+ }
+ if res == nil {
+ t.Fatalf("Result must be diffrent from nil")
+ }
+
+ res, err = peer.TransactionPreExecution(tx)
+ if err != utils.ErrNotImplemented {
+ t.Fatalf("Error must be ErrNotImplemented [%s].", err)
+ }
+ if res != nil {
+ t.Fatalf("Result must nil")
+ }
+ }
+}
+
+func TestPeerStateEncryptor(t *testing.T) {
+ initNodes()
+ defer closeNodes()
+
+ _, deployTx, err := createConfidentialDeployTransaction(t)
+ if err != nil {
+ t.Fatalf("Failed creating deploy transaction [%s].", err)
+ }
+ _, invokeTxOne, err := createConfidentialExecuteTransaction(t)
+ if err != nil {
+ t.Fatalf("Failed creating invoke transaction [%s].", err)
+ }
+
+ res, err := peer.GetStateEncryptor(deployTx, invokeTxOne)
+ if err != utils.ErrNotImplemented {
+ t.Fatalf("Error must be ErrNotImplemented [%s].", err)
+ }
+ if res != nil {
+ t.Fatalf("Result must be nil")
+ }
+}
+
+func TestPeerSignVerify(t *testing.T) {
+ initNodes()
+ defer closeNodes()
+
+ msg := []byte("Hello World!!!")
+ signature, err := peer.Sign(msg)
+ if err != nil {
+ t.Fatalf("TestSign: failed generating signature [%s].", err)
+ }
+
+ err = peer.Verify(peer.GetID(), signature, msg)
+ if err != nil {
+ t.Fatalf("TestSign: failed validating signature [%s].", err)
+ }
+
+ signature, err = validator.Sign(msg)
+ if err != nil {
+ t.Fatalf("TestSign: failed generating signature [%s].", err)
+ }
+
+ err = peer.Verify(validator.GetID(), signature, msg)
+ if err != nil {
+ t.Fatalf("TestSign: failed validating signature [%s].", err)
+ }
+}
+
+func TestPeerVerify(t *testing.T) {
+ initNodes()
+ defer closeNodes()
+
+ msg := []byte("Hello World!!!")
+ signature, err := validator.Sign(msg)
+ if err != nil {
+ t.Fatalf("Failed generating signature [%s].", err)
+ }
+
+ err = peer.Verify(nil, signature, msg)
+ if err == nil {
+ t.Fatal("Verify should fail when given an empty id.", err)
+ }
+
+ err = peer.Verify(msg, signature, msg)
+ if err == nil {
+ t.Fatal("Verify should fail when given an invalid id.", err)
+ }
+
+ err = peer.Verify(validator.GetID(), nil, msg)
+ if err == nil {
+ t.Fatal("Verify should fail when given an invalid signature.", err)
+ }
+
+ err = peer.Verify(validator.GetID(), msg, msg)
+ if err == nil {
+ t.Fatal("Verify should fail when given an invalid signature.", err)
+ }
+
+ err = peer.Verify(validator.GetID(), signature, nil)
+ if err == nil {
+ t.Fatal("Verify should fail when given an invalid messahe.", err)
+ }
+}
+
+func TestValidatorID(t *testing.T) {
+ initNodes()
+ defer closeNodes()
+
+ // Verify that any id modification doesn't change
+ id := validator.GetID()
+
+ if id == nil {
+ t.Fatalf("Id is nil.")
+ }
+
+ if len(id) == 0 {
+ t.Fatalf("Id length is zero.")
+ }
+
+ id[0] = id[0] + 1
+ id2 := validator.GetID()
+ if id2[0] == id[0] {
+ t.Fatalf("Invariant not respected.")
+ }
+}
+
+func TestValidatorDeployTransaction(t *testing.T) {
+ initNodes()
+ defer closeNodes()
+
+ for i, createTx := range deployTxCreators {
+ t.Logf("TestValidatorDeployTransaction with [%d]\n", i)
+
+ otx, tx, err := createTx(t)
+ if err != nil {
+ t.Fatalf("Failed creating deploy transaction [%s].", err)
+ }
+
+ res, err := validator.TransactionPreValidation(tx)
+ if err != nil {
+ t.Fatalf("Error must be nil [%s].", err)
+ }
+ if res == nil {
+ t.Fatalf("Result must be diffrent from nil")
+ }
+
+ res, err = validator.TransactionPreExecution(tx)
+ if err != nil {
+ t.Fatalf("Error must be nil [%s].", err)
+ }
+ if res == nil {
+ t.Fatalf("Result must be diffrent from nil")
+ }
+
+ // Test invalid ConfidentialityLevel
+ oldConfidentialityLevel := tx.ConfidentialityLevel
+ tx.ConfidentialityLevel = -1
+ _, err = validator.TransactionPreExecution(tx)
+ if err == nil {
+ t.Fatalf("TransactionPreExecution should fail. Invalid ConfidentialityLevel. %s", err)
+ }
+ if err != utils.ErrInvalidConfidentialityLevel {
+ t.Fatalf("TransactionPreExecution should with ErrInvalidConfidentialityLevel rather than [%s]", err)
+ }
+ tx.ConfidentialityLevel = oldConfidentialityLevel
+
+ if tx.ConfidentialityLevel == obc.ConfidentialityLevel_CONFIDENTIAL {
+ if reflect.DeepEqual(res, tx) {
+ t.Fatalf("Src and Dest Transaction should be different after PreExecution")
+ }
+ if err := isEqual(otx, res); err != nil {
+ t.Fatalf("Decrypted transaction differs from the original: [%s]", err)
+ }
+
+ // Test no ToValidators
+ oldToValidators := tx.ToValidators
+ tx.ToValidators = nil
+ _, err = validator.TransactionPreExecution(tx)
+ if err == nil {
+ t.Fatalf("TransactionPreExecution should fail. No ToValidators. %s", err)
+ }
+ tx.ToValidators = oldToValidators
+
+ // Test invalid ToValidators
+ oldToValidators = tx.ToValidators
+ tx.ToValidators = []byte{0, 1, 2, 3, 4}
+ _, err = validator.TransactionPreExecution(tx)
+ if err == nil {
+ t.Fatalf("TransactionPreExecution should fail. Invalid ToValidators. %s", err)
+ }
+ tx.ToValidators = oldToValidators
+
+ // Test no Payload
+ oldPayload := tx.Payload
+ tx.Payload = nil
+ _, err = validator.TransactionPreExecution(tx)
+ if err == nil {
+ t.Fatalf("TransactionPreExecution should fail. No Payload. %s", err)
+ }
+ tx.Payload = oldPayload
+
+ // Test invalid Payload
+ oldPayload = tx.Payload
+ tx.Payload = []byte{0, 1, 2, 3, 4}
+ _, err = validator.TransactionPreExecution(tx)
+ if err == nil {
+ t.Fatalf("TransactionPreExecution should fail. Invalid Payload. %s", err)
+ }
+ tx.Payload = oldPayload
+
+ // Test no Payload
+ oldChaincodeID := tx.ChaincodeID
+ tx.ChaincodeID = nil
+ _, err = validator.TransactionPreExecution(tx)
+ if err == nil {
+ t.Fatalf("TransactionPreExecution should fail. No ChaincodeID. %s", err)
+ }
+ tx.ChaincodeID = oldChaincodeID
+
+ // Test invalid Payload
+ oldChaincodeID = tx.ChaincodeID
+ tx.ChaincodeID = []byte{0, 1, 2, 3, 4}
+ _, err = validator.TransactionPreExecution(tx)
+ if err == nil {
+ t.Fatalf("TransactionPreExecution should fail. Invalid ChaincodeID. %s", err)
+ }
+ tx.ChaincodeID = oldChaincodeID
+ }
+ }
+}
+
+func TestValidatorExecuteTransaction(t *testing.T) {
+ initNodes()
+ defer closeNodes()
+
+ for i, createTx := range executeTxCreators {
+ t.Logf("TestValidatorExecuteTransaction with [%d]\n", i)
+
+ otx, tx, err := createTx(t)
+ if err != nil {
+ t.Fatalf("Failed creating execute transaction [%s].", err)
+ }
+
+ res, err := validator.TransactionPreValidation(tx)
+ if err != nil {
+ t.Fatalf("Error must be nil [%s].", err)
+ }
+ if res == nil {
+ t.Fatalf("Result must be diffrent from nil")
+ }
+
+ res, err = validator.TransactionPreExecution(tx)
+ if err != nil {
+ t.Fatalf("Error must be nil [%s].", err)
+ }
+ if res == nil {
+ t.Fatalf("Result must be diffrent from nil")
+ }
+
+ if tx.ConfidentialityLevel == obc.ConfidentialityLevel_CONFIDENTIAL {
+ if reflect.DeepEqual(res, tx) {
+ t.Fatalf("Src and Dest Transaction should be different after PreExecution")
+ }
+ if err := isEqual(otx, res); err != nil {
+ t.Fatalf("Decrypted transaction differs from the original: [%s]", err)
+ }
+ }
+ }
+}
+
+func TestValidatorQueryTransaction(t *testing.T) {
+ initNodes()
+ defer closeNodes()
+
+ for i, createTx := range queryTxCreators {
+ t.Logf("TestValidatorConfidentialQueryTransaction with [%d]\n", i)
+
+ _, deployTx, err := deployTxCreators[i](t)
+ if err != nil {
+ t.Fatalf("Failed creating deploy transaction [%s].", err)
+ }
+ _, invokeTxOne, err := executeTxCreators[i](t)
+ if err != nil {
+ t.Fatalf("Failed creating invoke transaction [%s].", err)
+ }
+ _, invokeTxTwo, err := executeTxCreators[i](t)
+ if err != nil {
+ t.Fatalf("Failed creating invoke transaction [%s].", err)
+ }
+ otx, queryTx, err := createTx(t)
+ if err != nil {
+ t.Fatalf("Failed creating query transaction [%s].", err)
+ }
+
+ if queryTx.ConfidentialityLevel == obc.ConfidentialityLevel_CONFIDENTIAL {
+
+ // Transactions must be PreExecuted by the validators before getting the StateEncryptor
+ if _, err = validator.TransactionPreValidation(deployTx); err != nil {
+ t.Fatalf("Failed pre-validating deploty transaction [%s].", err)
+ }
+ if deployTx, err = validator.TransactionPreExecution(deployTx); err != nil {
+ t.Fatalf("Failed pre-executing deploty transaction [%s].", err)
+ }
+ if _, err = validator.TransactionPreValidation(invokeTxOne); err != nil {
+ t.Fatalf("Failed pre-validating exec1 transaction [%s].", err)
+ }
+ if invokeTxOne, err = validator.TransactionPreExecution(invokeTxOne); err != nil {
+ t.Fatalf("Failed pre-executing exec1 transaction [%s].", err)
+ }
+ if _, err = validator.TransactionPreValidation(invokeTxTwo); err != nil {
+ t.Fatalf("Failed pre-validating exec2 transaction [%s].", err)
+ }
+ if invokeTxTwo, err = validator.TransactionPreExecution(invokeTxTwo); err != nil {
+ t.Fatalf("Failed pre-executing exec2 transaction [%s].", err)
+ }
+ if _, err = validator.TransactionPreValidation(queryTx); err != nil {
+ t.Fatalf("Failed pre-validating query transaction [%s].", err)
+ }
+ if queryTx, err = validator.TransactionPreExecution(queryTx); err != nil {
+ t.Fatalf("Failed pre-executing query transaction [%s].", err)
+ }
+ if err := isEqual(otx, queryTx); err != nil {
+ t.Fatalf("Decrypted transaction differs from the original: [%s]", err)
+ }
+
+ // First invokeTx
+ seOne, err := validator.GetStateEncryptor(deployTx, invokeTxOne)
+ if err != nil {
+ t.Fatalf("Failed creating state encryptor [%s].", err)
+ }
+ pt := []byte("Hello World")
+ aCt, err := seOne.Encrypt(pt)
+ if err != nil {
+ t.Fatalf("Failed encrypting state [%s].", err)
+ }
+ aPt, err := seOne.Decrypt(aCt)
+ if err != nil {
+ t.Fatalf("Failed decrypting state [%s].", err)
+ }
+ if !bytes.Equal(pt, aPt) {
+ t.Fatalf("Failed decrypting state [%s != %s]: %s", string(pt), string(aPt), err)
+ }
+ // Try to decrypt nil. It should return nil with no error
+ out, err := seOne.Decrypt(nil)
+ if err != nil {
+ t.Fatal("Decrypt should not fail on nil input")
+ }
+ if out != nil {
+ t.Fatal("Nil input should decrypt to nil")
+ }
+
+ // Second invokeTx
+ seTwo, err := validator.GetStateEncryptor(deployTx, invokeTxTwo)
+ if err != nil {
+ t.Fatalf("Failed creating state encryptor [%s].", err)
+ }
+ aPt2, err := seTwo.Decrypt(aCt)
+ if err != nil {
+ t.Fatalf("Failed decrypting state [%s].", err)
+ }
+ if !bytes.Equal(pt, aPt2) {
+ t.Fatalf("Failed decrypting state [%s != %s]: %s", string(pt), string(aPt), err)
+ }
+ // Reencrypt the state
+ aCt, err = seTwo.Encrypt(pt)
+ if err != nil {
+ t.Fatalf("Failed encrypting state [%s].", err)
+ }
+
+ // Try to decrypt nil. It should return nil with no error
+ out, err = seTwo.Decrypt(nil)
+ if err != nil {
+ t.Fatal("Decrypt should not fail on nil input")
+ }
+ if out != nil {
+ t.Fatal("Nil input should decrypt to nil")
+ }
+
+ // queryTx
+ seThree, err := validator.GetStateEncryptor(deployTx, queryTx)
+ aPt2, err = seThree.Decrypt(aCt)
+ if err != nil {
+ t.Fatalf("Failed decrypting state [%s].", err)
+ }
+ if !bytes.Equal(pt, aPt2) {
+ t.Fatalf("Failed decrypting state [%s != %s]: %s", string(pt), string(aPt), err)
+ }
+
+ ctQ, err := seThree.Encrypt(aPt2)
+ if err != nil {
+ t.Fatalf("Failed encrypting query result [%s].", err)
+ }
+ aPt3, err := invoker.DecryptQueryResult(queryTx, ctQ)
+ if err != nil {
+ t.Fatalf("Failed decrypting query result [%s].", err)
+ }
+ if !bytes.Equal(aPt2, aPt3) {
+ t.Fatalf("Failed decrypting query result [%s != %s]: %s", string(aPt2), string(aPt3), err)
+ }
+ }
+ }
+}
+
+func TestValidatorStateEncryptor(t *testing.T) {
+ initNodes()
+ defer closeNodes()
+
+ _, deployTx, err := createConfidentialDeployTransaction(t)
+ if err != nil {
+ t.Fatalf("Failed creating deploy transaction [%s]", err)
+ }
+ _, invokeTxOne, err := createConfidentialExecuteTransaction(t)
+ if err != nil {
+ t.Fatalf("Failed creating invoke transaction [%s]", err)
+ }
+ _, invokeTxTwo, err := createConfidentialExecuteTransaction(t)
+ if err != nil {
+ t.Fatalf("Failed creating invoke transaction [%s]", err)
+ }
+
+ // Transactions must be PreExecuted by the validators before getting the StateEncryptor
+ if _, err = validator.TransactionPreValidation(deployTx); err != nil {
+ t.Fatalf("Failed pre-validating deploty transaction [%s].", err)
+ }
+ if deployTx, err = validator.TransactionPreExecution(deployTx); err != nil {
+ t.Fatalf("Failed pre-validating deploty transaction [%s].", err)
+ }
+ if _, err = validator.TransactionPreValidation(invokeTxOne); err != nil {
+ t.Fatalf("Failed pre-validating exec1 transaction [%s].", err)
+ }
+ if invokeTxOne, err = validator.TransactionPreExecution(invokeTxOne); err != nil {
+ t.Fatalf("Failed pre-validating exec1 transaction [%s].", err)
+ }
+ if _, err = validator.TransactionPreValidation(invokeTxTwo); err != nil {
+ t.Fatalf("Failed pre-validating exec2 transaction [%s].", err)
+ }
+ if invokeTxTwo, err = validator.TransactionPreExecution(invokeTxTwo); err != nil {
+ t.Fatalf("Failed pre-validating exec2 transaction [%s].", err)
+ }
+
+ seOne, err := validator.GetStateEncryptor(deployTx, invokeTxOne)
+ if err != nil {
+ t.Fatalf("Failed creating state encryptor [%s].", err)
+ }
+ pt := []byte("Hello World")
+ aCt, err := seOne.Encrypt(pt)
+ if err != nil {
+ t.Fatalf("Failed encrypting state [%s].", err)
+ }
+ aPt, err := seOne.Decrypt(aCt)
+ if err != nil {
+ t.Fatalf("Failed decrypting state [%s].", err)
+ }
+ if !bytes.Equal(pt, aPt) {
+ t.Fatalf("Failed decrypting state [%s != %s]: %s", string(pt), string(aPt), err)
+ }
+
+ // Try to decrypt nil. It should return nil with no error
+ out, err := seOne.Decrypt(nil)
+ if err != nil {
+ t.Fatal("Decrypt should not fail on nil input")
+ }
+ if out != nil {
+ t.Fatal("Nil input should decrypt to nil")
+ }
+
+ seTwo, err := validator.GetStateEncryptor(deployTx, invokeTxTwo)
+ if err != nil {
+ t.Fatalf("Failed creating state encryptor [%s].", err)
+ }
+ aPt2, err := seTwo.Decrypt(aCt)
+ if err != nil {
+ t.Fatalf("Failed decrypting state [%s].", err)
+ }
+ if !bytes.Equal(pt, aPt2) {
+ t.Fatalf("Failed decrypting state [%s != %s]: %s", string(pt), string(aPt), err)
+ }
+
+ // Try to decrypt nil. It should return nil with no error
+ out, err = seTwo.Decrypt(nil)
+ if err != nil {
+ t.Fatal("Decrypt should not fail on nil input")
+ }
+ if out != nil {
+ t.Fatal("Nil input should decrypt to nil")
+ }
+
+}
+
+func TestValidatorSignVerify(t *testing.T) {
+ initNodes()
+ defer closeNodes()
+
+ msg := []byte("Hello World!!!")
+ signature, err := validator.Sign(msg)
+ if err != nil {
+ t.Fatalf("TestSign: failed generating signature [%s].", err)
+ }
+
+ err = validator.Verify(validator.GetID(), signature, msg)
+ if err != nil {
+ t.Fatalf("TestSign: failed validating signature [%s].", err)
+ }
+}
+
+func TestValidatorVerify(t *testing.T) {
+ initNodes()
+ defer closeNodes()
+
+ msg := []byte("Hello World!!!")
+ signature, err := validator.Sign(msg)
+ if err != nil {
+ t.Fatalf("Failed generating signature [%s].", err)
+ }
+
+ err = validator.Verify(nil, signature, msg)
+ if err == nil {
+ t.Fatal("Verify should fail when given an empty id.", err)
+ }
+
+ err = validator.Verify(msg, signature, msg)
+ if err == nil {
+ t.Fatal("Verify should fail when given an invalid id.", err)
+ }
+
+ err = validator.Verify(validator.GetID(), nil, msg)
+ if err == nil {
+ t.Fatal("Verify should fail when given an invalid signature.", err)
+ }
+
+ err = validator.Verify(validator.GetID(), msg, msg)
+ if err == nil {
+ t.Fatal("Verify should fail when given an invalid signature.", err)
+ }
+
+ err = validator.Verify(validator.GetID(), signature, nil)
+ if err == nil {
+ t.Fatal("Verify should fail when given an invalid messahe.", err)
+ }
+}
+
+func BenchmarkTransactionCreation(b *testing.B) {
+ initNodes()
+ defer closeNodes()
+
+ b.StopTimer()
+ b.ResetTimer()
+ cis := &obc.ChaincodeInvocationSpec{
+ ChaincodeSpec: &obc.ChaincodeSpec{
+ Type: obc.ChaincodeSpec_GOLANG,
+ ChaincodeID: &obc.ChaincodeID{Path: "Contract001"},
+ CtorMsg: nil,
+ ConfidentialityLevel: obc.ConfidentialityLevel_CONFIDENTIAL,
+ },
+ }
+ invoker.GetTCertificateHandlerNext(attrs...)
+
+ for i := 0; i < b.N; i++ {
+ uuid := util.GenerateUUID()
+ b.StartTimer()
+ invoker.NewChaincodeExecute(cis, uuid, attrs...)
+ b.StopTimer()
+ }
+}
+
+func BenchmarkTransactionValidation(b *testing.B) {
+ initNodes()
+ defer closeNodes()
+
+ b.StopTimer()
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ _, tx, _ := createConfidentialTCertHExecuteTransaction(nil)
+
+ b.StartTimer()
+ validator.TransactionPreValidation(tx)
+ validator.TransactionPreExecution(tx)
+ b.StopTimer()
+ }
+}
+
+func BenchmarkSign(b *testing.B) {
+ b.StopTimer()
+ b.ResetTimer()
+
+ //b.Logf("#iterations %d\n", b.N)
+ signKey, _ := primitives.NewECDSAKey()
+ hash := make([]byte, 48)
+
+ for i := 0; i < b.N; i++ {
+ rand.Read(hash)
+ b.StartTimer()
+ primitives.ECDSASign(signKey, hash)
+ b.StopTimer()
+ }
+}
+
+func BenchmarkVerify(b *testing.B) {
+ b.StopTimer()
+ b.ResetTimer()
+
+ //b.Logf("#iterations %d\n", b.N)
+ signKey, _ := primitives.NewECDSAKey()
+ verKey := signKey.PublicKey
+ hash := make([]byte, 48)
+
+ for i := 0; i < b.N; i++ {
+ rand.Read(hash)
+ sigma, _ := primitives.ECDSASign(signKey, hash)
+ b.StartTimer()
+ primitives.ECDSAVerify(&verKey, hash, sigma)
+ b.StopTimer()
+ }
+}
+
+func setup() {
+ // Conf
+ viper.SetConfigName("crypto_test") // name of config file (without extension)
+ viper.AddConfigPath(".") // path to look for the config file in
+ err := viper.ReadInConfig() // Find and read the config file
+ if err != nil { // Handle errors reading the config file
+ panic(fmt.Errorf("Fatal error config file [%s] \n", err))
+ }
+
+ // Set Default properties
+ viper.Set("peer.fileSystemPath", filepath.Join(os.TempDir(), "obc-crypto-tests", "peers"))
+ viper.Set("server.rootpath", filepath.Join(os.TempDir(), "obc-crypto-tests", "ca"))
+ viper.Set("peer.pki.tls.rootcert.file", filepath.Join(os.TempDir(), "obc-crypto-tests", "ca", "tlsca.cert"))
+
+ // Logging
+ var formatter = logging.MustStringFormatter(
+ `%{color}[%{module}] %{shortfunc} [%{shortfile}] -> %{level:.4s} %{id:03x}%{color:reset} %{message}`,
+ )
+ logging.SetFormatter(formatter)
+
+ // TX creators
+ deployTxCreators = []createTxFunc{
+ createPublicDeployTransaction,
+ createConfidentialDeployTransaction,
+ createConfidentialTCertHDeployTransaction,
+ createConfidentialECertHDeployTransaction,
+ }
+ executeTxCreators = []createTxFunc{
+ createPublicExecuteTransaction,
+ createConfidentialExecuteTransaction,
+ createConfidentialTCertHExecuteTransaction,
+ createConfidentialECertHExecuteTransaction,
+ }
+ queryTxCreators = []createTxFunc{
+ createPublicQueryTransaction,
+ createConfidentialQueryTransaction,
+ createConfidentialTCertHQueryTransaction,
+ createConfidentialECertHQueryTransaction,
+ }
+
+ // Init crypto layer
+ Init()
+
+ // Clenaup folders
+ removeFolders()
+}
+
+func initPKI() {
+ ca.LogInit(ioutil.Discard, os.Stdout, os.Stdout, os.Stderr, os.Stdout)
+ ca.CacheConfiguration() // Need cache the configuration first
+ aca = ca.NewACA()
+ eca = ca.NewECA()
+ tca = ca.NewTCA(eca)
+ tlsca = ca.NewTLSCA(eca)
+}
+
+func startPKI() {
+ var opts []grpc.ServerOption
+ if viper.GetBool("peer.pki.tls.enabled") {
+ // TLS configuration
+ creds, err := credentials.NewServerTLSFromFile(
+ filepath.Join(viper.GetString("server.rootpath"), "tlsca.cert"),
+ filepath.Join(viper.GetString("server.rootpath"), "tlsca.priv"),
+ )
+ if err != nil {
+ panic("Failed creating credentials for OBC-CA: " + err.Error())
+ }
+ opts = []grpc.ServerOption{grpc.Creds(creds)}
+ }
+
+ fmt.Printf("open socket...\n")
+ sockp, err := net.Listen("tcp", viper.GetString("server.port"))
+ if err != nil {
+ panic("Cannot open port: " + err.Error())
+ }
+ fmt.Printf("open socket...done\n")
+
+ server = grpc.NewServer(opts...)
+ aca.Start(server)
+ eca.Start(server)
+ tca.Start(server)
+ tlsca.Start(server)
+
+ fmt.Printf("start serving...\n")
+ server.Serve(sockp)
+}
+
+func initNodes() {
+ // Init clients
+ err := initClients()
+ if err != nil {
+ fmt.Printf("Failed initializing clients [%s]\n", err)
+ panic(fmt.Errorf("Failed initializing clients [%s].", err))
+ }
+
+ // Init peer
+ err = initPeers()
+ if err != nil {
+ fmt.Printf("Failed initializing peers [%s]\n", err)
+ panic(fmt.Errorf("Failed initializing peers [%s].", err))
+ }
+
+ // Init validators
+ err = initValidators()
+ if err != nil {
+ fmt.Printf("Failed initializing validators [%s]\n", err)
+ panic(fmt.Errorf("Failed initializing validators [%s].", err))
+ }
+
+}
+
+func closeNodes() {
+ ok, errs := CloseAllClients()
+ if !ok {
+ for _, err := range errs {
+ log.Errorf("Failed closing clients [%s]", err)
+ }
+ }
+ ok, errs = CloseAllPeers()
+ if !ok {
+ for _, err := range errs {
+ log.Errorf("Failed closing clients [%s]", err)
+ }
+ }
+ ok, errs = CloseAllValidators()
+}
+
+func initClients() error {
+ // Deployer
+ deployerConf := utils.NodeConfiguration{Type: "client", Name: "user1"}
+ if err := RegisterClient(deployerConf.Name, ksPwd, deployerConf.GetEnrollmentID(), deployerConf.GetEnrollmentPWD()); err != nil {
+ return err
+ }
+ var err error
+ deployer, err = InitClient(deployerConf.Name, ksPwd)
+ if err != nil {
+ return err
+ }
+
+ // Invoker
+ invokerConf := utils.NodeConfiguration{Type: "client", Name: "user2"}
+ if err = RegisterClient(invokerConf.Name, ksPwd, invokerConf.GetEnrollmentID(), invokerConf.GetEnrollmentPWD()); err != nil {
+ return err
+ }
+ invoker, err = InitClient(invokerConf.Name, ksPwd)
+ if err != nil {
+ return err
+ }
+
+ return nil
+}
+
+func initPeers() error {
+ // Register
+ conf := utils.NodeConfiguration{Type: "peer", Name: "peer"}
+ err := RegisterPeer(conf.Name, ksPwd, conf.GetEnrollmentID(), conf.GetEnrollmentPWD())
+ if err != nil {
+ return err
+ }
+
+ // Verify that a second call to Register fails
+ err = RegisterPeer(conf.Name, ksPwd, conf.GetEnrollmentID(), conf.GetEnrollmentPWD())
+ if err != nil {
+ return err
+ }
+
+ // Init
+ peer, err = InitPeer(conf.Name, ksPwd)
+ if err != nil {
+ return err
+ }
+
+ err = RegisterPeer(conf.Name, ksPwd, conf.GetEnrollmentID(), conf.GetEnrollmentPWD())
+ if err != nil {
+ return err
+ }
+
+ return err
+}
+
+func initValidators() error {
+ // Register
+ conf := utils.NodeConfiguration{Type: "validator", Name: "validator"}
+ err := RegisterValidator(conf.Name, ksPwd, conf.GetEnrollmentID(), conf.GetEnrollmentPWD())
+ if err != nil {
+ return err
+ }
+
+ // Verify that a second call to Register fails
+ err = RegisterValidator(conf.Name, ksPwd, conf.GetEnrollmentID(), conf.GetEnrollmentPWD())
+ if err != nil {
+ return err
+ }
+
+ // Init
+ validator, err = InitValidator(conf.Name, ksPwd)
+ if err != nil {
+ return err
+ }
+
+ err = RegisterValidator(conf.Name, ksPwd, conf.GetEnrollmentID(), conf.GetEnrollmentPWD())
+ if err != nil {
+ return err
+ }
+
+ return err
+}
+
+func createConfidentialDeployTransaction(t *testing.T) (*obc.Transaction, *obc.Transaction, error) {
+ uuid := util.GenerateUUID()
+
+ cds := &obc.ChaincodeDeploymentSpec{
+ ChaincodeSpec: &obc.ChaincodeSpec{
+ Type: obc.ChaincodeSpec_GOLANG,
+ ChaincodeID: &obc.ChaincodeID{Path: "Contract001"},
+ CtorMsg: nil,
+ ConfidentialityLevel: obc.ConfidentialityLevel_CONFIDENTIAL,
+ Metadata: []byte("Hello World"),
+ },
+ EffectiveDate: nil,
+ CodePackage: nil,
+ }
+
+ otx, err := obc.NewChaincodeDeployTransaction(cds, uuid)
+ otx.Metadata = cds.ChaincodeSpec.Metadata
+ if err != nil {
+ return nil, nil, err
+ }
+ tx, err := deployer.NewChaincodeDeployTransaction(cds, uuid, attrs...)
+ return otx, tx, err
+}
+
+func createConfidentialExecuteTransaction(t *testing.T) (*obc.Transaction, *obc.Transaction, error) {
+ uuid := util.GenerateUUID()
+
+ cis := &obc.ChaincodeInvocationSpec{
+ ChaincodeSpec: &obc.ChaincodeSpec{
+ Type: obc.ChaincodeSpec_GOLANG,
+ ChaincodeID: &obc.ChaincodeID{Path: "Contract001"},
+ CtorMsg: nil,
+ ConfidentialityLevel: obc.ConfidentialityLevel_CONFIDENTIAL,
+ Metadata: []byte("Hello World"),
+ },
+ }
+
+ otx, err := obc.NewChaincodeExecute(cis, uuid, obc.Transaction_CHAINCODE_INVOKE)
+ otx.Metadata = cis.ChaincodeSpec.Metadata
+ if err != nil {
+ return nil, nil, err
+ }
+ tx, err := invoker.NewChaincodeExecute(cis, uuid, attrs...)
+ return otx, tx, err
+}
+
+func createConfidentialQueryTransaction(t *testing.T) (*obc.Transaction, *obc.Transaction, error) {
+ uuid := util.GenerateUUID()
+
+ cis := &obc.ChaincodeInvocationSpec{
+ ChaincodeSpec: &obc.ChaincodeSpec{
+ Type: obc.ChaincodeSpec_GOLANG,
+ ChaincodeID: &obc.ChaincodeID{Path: "Contract001"},
+ CtorMsg: nil,
+ ConfidentialityLevel: obc.ConfidentialityLevel_CONFIDENTIAL,
+ Metadata: []byte("Hello World"),
+ },
+ }
+
+ otx, err := obc.NewChaincodeExecute(cis, uuid, obc.Transaction_CHAINCODE_QUERY)
+ otx.Metadata = cis.ChaincodeSpec.Metadata
+ if err != nil {
+ return nil, nil, err
+ }
+ tx, err := invoker.NewChaincodeQuery(cis, uuid, attrs...)
+ return otx, tx, err
+}
+
+func createConfidentialTCertHDeployTransaction(t *testing.T) (*obc.Transaction, *obc.Transaction, error) {
+ uuid := util.GenerateUUID()
+
+ cds := &obc.ChaincodeDeploymentSpec{
+ ChaincodeSpec: &obc.ChaincodeSpec{
+ Type: obc.ChaincodeSpec_GOLANG,
+ ChaincodeID: &obc.ChaincodeID{Path: "Contract001"},
+ CtorMsg: nil,
+ ConfidentialityLevel: obc.ConfidentialityLevel_CONFIDENTIAL,
+ },
+ EffectiveDate: nil,
+ CodePackage: nil,
+ }
+
+ otx, err := obc.NewChaincodeDeployTransaction(cds, uuid)
+ if err != nil {
+ return nil, nil, err
+ }
+ handler, err := deployer.GetTCertificateHandlerNext(attrs...)
+ if err != nil {
+ return nil, nil, err
+ }
+ txHandler, err := handler.GetTransactionHandler()
+ if err != nil {
+ return nil, nil, err
+ }
+ tx, err := txHandler.NewChaincodeDeployTransaction(cds, uuid, attrs...)
+
+ // Check binding consistency
+ binding, err := txHandler.GetBinding()
+ if err != nil {
+ t.Fatal("Failed getting binding from transaction handler.")
+ }
+
+ txBinding, err := validator.GetTransactionBinding(tx)
+ if err != nil {
+ t.Fatal("Failed getting transaction binding.")
+ }
+
+ if !reflect.DeepEqual(binding, txBinding) {
+ t.Fatal("Binding is malformed!")
+ }
+
+ // Check confidentiality level
+ if tx.ConfidentialityLevel != cds.ChaincodeSpec.ConfidentialityLevel {
+ t.Fatal("Failed setting confidentiality level")
+ }
+
+ // Check metadata
+ if !reflect.DeepEqual(cds.ChaincodeSpec.Metadata, tx.Metadata) {
+ t.Fatal("Failed copying metadata")
+ }
+
+ return otx, tx, err
+}
+
+func createConfidentialTCertHExecuteTransaction(t *testing.T) (*obc.Transaction, *obc.Transaction, error) {
+ uuid := util.GenerateUUID()
+
+ cis := &obc.ChaincodeInvocationSpec{
+ ChaincodeSpec: &obc.ChaincodeSpec{
+ Type: obc.ChaincodeSpec_GOLANG,
+ ChaincodeID: &obc.ChaincodeID{Path: "Contract001"},
+ CtorMsg: nil,
+ ConfidentialityLevel: obc.ConfidentialityLevel_CONFIDENTIAL,
+ },
+ }
+
+ otx, err := obc.NewChaincodeExecute(cis, uuid, obc.Transaction_CHAINCODE_INVOKE)
+ if err != nil {
+ return nil, nil, err
+ }
+ handler, err := invoker.GetTCertificateHandlerNext(attrs...)
+
+ if err != nil {
+ return nil, nil, err
+ }
+ txHandler, err := handler.GetTransactionHandler()
+ if err != nil {
+ return nil, nil, err
+ }
+ tx, err := txHandler.NewChaincodeExecute(cis, uuid, attrs...)
+
+ // Check binding consistency
+ binding, _ := txHandler.GetBinding()
+ if !reflect.DeepEqual(binding, primitives.Hash(append(handler.GetCertificate(), tx.Nonce...))) {
+ t.Fatal("Binding is malformed!")
+ }
+
+ // Check confidentiality level
+ if tx.ConfidentialityLevel != cis.ChaincodeSpec.ConfidentialityLevel {
+ t.Fatal("Failed setting confidentiality level")
+ }
+
+ // Check metadata
+ if !reflect.DeepEqual(cis.ChaincodeSpec.Metadata, tx.Metadata) {
+ t.Fatal("Failed copying metadata")
+ }
+
+ return otx, tx, err
+}
+
+func createConfidentialTCertHQueryTransaction(t *testing.T) (*obc.Transaction, *obc.Transaction, error) {
+ uuid := util.GenerateUUID()
+
+ cis := &obc.ChaincodeInvocationSpec{
+ ChaincodeSpec: &obc.ChaincodeSpec{
+ Type: obc.ChaincodeSpec_GOLANG,
+ ChaincodeID: &obc.ChaincodeID{Path: "Contract001"},
+ CtorMsg: nil,
+ ConfidentialityLevel: obc.ConfidentialityLevel_CONFIDENTIAL,
+ },
+ }
+
+ otx, err := obc.NewChaincodeExecute(cis, uuid, obc.Transaction_CHAINCODE_QUERY)
+ if err != nil {
+ return nil, nil, err
+ }
+ handler, err := invoker.GetTCertificateHandlerNext(attrs...)
+ if err != nil {
+ return nil, nil, err
+ }
+ txHandler, err := handler.GetTransactionHandler()
+ if err != nil {
+ return nil, nil, err
+ }
+ tx, err := txHandler.NewChaincodeQuery(cis, uuid, attrs...)
+
+ // Check binding consistency
+ binding, _ := txHandler.GetBinding()
+ if !reflect.DeepEqual(binding, primitives.Hash(append(handler.GetCertificate(), tx.Nonce...))) {
+ t.Fatal("Binding is malformed!")
+ }
+
+ // Check confidentiality level
+ if tx.ConfidentialityLevel != cis.ChaincodeSpec.ConfidentialityLevel {
+ t.Fatal("Failed setting confidentiality level")
+ }
+
+ // Check metadata
+ if !reflect.DeepEqual(cis.ChaincodeSpec.Metadata, tx.Metadata) {
+ t.Fatal("Failed copying metadata")
+ }
+
+ return otx, tx, err
+}
+
+func createConfidentialECertHDeployTransaction(t *testing.T) (*obc.Transaction, *obc.Transaction, error) {
+ uuid := util.GenerateUUID()
+
+ cds := &obc.ChaincodeDeploymentSpec{
+ ChaincodeSpec: &obc.ChaincodeSpec{
+ Type: obc.ChaincodeSpec_GOLANG,
+ ChaincodeID: &obc.ChaincodeID{Path: "Contract001"},
+ CtorMsg: nil,
+ ConfidentialityLevel: obc.ConfidentialityLevel_CONFIDENTIAL,
+ },
+ EffectiveDate: nil,
+ CodePackage: nil,
+ }
+
+ otx, err := obc.NewChaincodeDeployTransaction(cds, uuid)
+ if err != nil {
+ return nil, nil, err
+ }
+ handler, err := deployer.GetEnrollmentCertificateHandler()
+ if err != nil {
+ return nil, nil, err
+ }
+ txHandler, err := handler.GetTransactionHandler()
+ if err != nil {
+ return nil, nil, err
+ }
+ tx, err := txHandler.NewChaincodeDeployTransaction(cds, uuid, attrs...)
+
+ // Check binding consistency
+ binding, _ := txHandler.GetBinding()
+ if !reflect.DeepEqual(binding, primitives.Hash(append(handler.GetCertificate(), tx.Nonce...))) {
+ t.Fatal("Binding is malformed!")
+ }
+
+ // Check confidentiality level
+ if tx.ConfidentialityLevel != cds.ChaincodeSpec.ConfidentialityLevel {
+ t.Fatal("Failed setting confidentiality level")
+ }
+
+ // Check metadata
+ if !reflect.DeepEqual(cds.ChaincodeSpec.Metadata, tx.Metadata) {
+ t.Fatal("Failed copying metadata")
+ }
+
+ return otx, tx, err
+}
+
+func createConfidentialECertHExecuteTransaction(t *testing.T) (*obc.Transaction, *obc.Transaction, error) {
+ uuid := util.GenerateUUID()
+
+ cis := &obc.ChaincodeInvocationSpec{
+ ChaincodeSpec: &obc.ChaincodeSpec{
+ Type: obc.ChaincodeSpec_GOLANG,
+ ChaincodeID: &obc.ChaincodeID{Path: "Contract001"},
+ CtorMsg: nil,
+ ConfidentialityLevel: obc.ConfidentialityLevel_CONFIDENTIAL,
+ },
+ }
+
+ otx, err := obc.NewChaincodeExecute(cis, uuid, obc.Transaction_CHAINCODE_INVOKE)
+ if err != nil {
+ return nil, nil, err
+ }
+ handler, err := invoker.GetEnrollmentCertificateHandler()
+ if err != nil {
+ return nil, nil, err
+ }
+ txHandler, err := handler.GetTransactionHandler()
+ if err != nil {
+ return nil, nil, err
+ }
+ tx, err := txHandler.NewChaincodeExecute(cis, uuid, attrs...)
+ // Check binding consistency
+ binding, _ := txHandler.GetBinding()
+ if !reflect.DeepEqual(binding, primitives.Hash(append(handler.GetCertificate(), tx.Nonce...))) {
+ t.Fatal("Binding is malformed!")
+ }
+
+ // Check confidentiality level
+ if tx.ConfidentialityLevel != cis.ChaincodeSpec.ConfidentialityLevel {
+ t.Fatal("Failed setting confidentiality level")
+ }
+
+ // Check metadata
+ if !reflect.DeepEqual(cis.ChaincodeSpec.Metadata, tx.Metadata) {
+ t.Fatal("Failed copying metadata")
+ }
+
+ return otx, tx, err
+}
+
+func createConfidentialECertHQueryTransaction(t *testing.T) (*obc.Transaction, *obc.Transaction, error) {
+ uuid := util.GenerateUUID()
+
+ cis := &obc.ChaincodeInvocationSpec{
+ ChaincodeSpec: &obc.ChaincodeSpec{
+ Type: obc.ChaincodeSpec_GOLANG,
+ ChaincodeID: &obc.ChaincodeID{Path: "Contract001"},
+ CtorMsg: nil,
+ ConfidentialityLevel: obc.ConfidentialityLevel_CONFIDENTIAL,
+ },
+ }
+
+ otx, err := obc.NewChaincodeExecute(cis, uuid, obc.Transaction_CHAINCODE_QUERY)
+ if err != nil {
+ return nil, nil, err
+ }
+ handler, err := invoker.GetEnrollmentCertificateHandler()
+ if err != nil {
+ return nil, nil, err
+ }
+ txHandler, err := handler.GetTransactionHandler()
+ if err != nil {
+ return nil, nil, err
+ }
+ tx, err := txHandler.NewChaincodeQuery(cis, uuid, attrs...)
+ // Check binding consistency
+ binding, _ := txHandler.GetBinding()
+ if !reflect.DeepEqual(binding, primitives.Hash(append(handler.GetCertificate(), tx.Nonce...))) {
+ t.Fatal("Binding is malformed!")
+ }
+
+ // Check confidentiality level
+ if tx.ConfidentialityLevel != cis.ChaincodeSpec.ConfidentialityLevel {
+ t.Fatal("Failed setting confidentiality level")
+ }
+
+ // Check metadata
+ if !reflect.DeepEqual(cis.ChaincodeSpec.Metadata, tx.Metadata) {
+ t.Fatal("Failed copying metadata")
+ }
+
+ return otx, tx, err
+}
+
+func createPublicDeployTransaction(t *testing.T) (*obc.Transaction, *obc.Transaction, error) {
+ uuid := util.GenerateUUID()
+
+ cds := &obc.ChaincodeDeploymentSpec{
+ ChaincodeSpec: &obc.ChaincodeSpec{
+ Type: obc.ChaincodeSpec_GOLANG,
+ ChaincodeID: &obc.ChaincodeID{Path: "Contract001"},
+ CtorMsg: nil,
+ ConfidentialityLevel: obc.ConfidentialityLevel_PUBLIC,
+ },
+ EffectiveDate: nil,
+ CodePackage: nil,
+ }
+
+ otx, err := obc.NewChaincodeDeployTransaction(cds, uuid)
+ if err != nil {
+ return nil, nil, err
+ }
+ tx, err := deployer.NewChaincodeDeployTransaction(cds, uuid, attrs...)
+ return otx, tx, err
+}
+
+func createPublicExecuteTransaction(t *testing.T) (*obc.Transaction, *obc.Transaction, error) {
+ uuid := util.GenerateUUID()
+
+ cis := &obc.ChaincodeInvocationSpec{
+ ChaincodeSpec: &obc.ChaincodeSpec{
+ Type: obc.ChaincodeSpec_GOLANG,
+ ChaincodeID: &obc.ChaincodeID{Path: "Contract001"},
+ CtorMsg: nil,
+ ConfidentialityLevel: obc.ConfidentialityLevel_PUBLIC,
+ },
+ }
+
+ otx, err := obc.NewChaincodeExecute(cis, uuid, obc.Transaction_CHAINCODE_INVOKE)
+ if err != nil {
+ return nil, nil, err
+ }
+ tx, err := invoker.NewChaincodeExecute(cis, uuid, attrs...)
+ return otx, tx, err
+}
+
+func createPublicQueryTransaction(t *testing.T) (*obc.Transaction, *obc.Transaction, error) {
+ uuid := util.GenerateUUID()
+
+ cis := &obc.ChaincodeInvocationSpec{
+ ChaincodeSpec: &obc.ChaincodeSpec{
+ Type: obc.ChaincodeSpec_GOLANG,
+ ChaincodeID: &obc.ChaincodeID{Path: "Contract001"},
+ CtorMsg: nil,
+ ConfidentialityLevel: obc.ConfidentialityLevel_PUBLIC,
+ },
+ }
+
+ otx, err := obc.NewChaincodeExecute(cis, uuid, obc.Transaction_CHAINCODE_QUERY)
+ if err != nil {
+ return nil, nil, err
+ }
+ tx, err := invoker.NewChaincodeQuery(cis, uuid, attrs...)
+ return otx, tx, err
+}
+
+func isEqual(src, dst *obc.Transaction) error {
+ if !reflect.DeepEqual(src.Payload, dst.Payload) {
+ return fmt.Errorf("Different Payload [%s]!=[%s].", utils.EncodeBase64(src.Payload), utils.EncodeBase64(dst.Payload))
+ }
+
+ if !reflect.DeepEqual(src.ChaincodeID, dst.ChaincodeID) {
+ return fmt.Errorf("Different ChaincodeID [%s]!=[%s].", utils.EncodeBase64(src.ChaincodeID), utils.EncodeBase64(dst.ChaincodeID))
+ }
+
+ if !reflect.DeepEqual(src.Metadata, dst.Metadata) {
+ return fmt.Errorf("Different Metadata [%s]!=[%s].", utils.EncodeBase64(src.Metadata), utils.EncodeBase64(dst.Metadata))
+ }
+
+ return nil
+}
+
+func cleanup() {
+ fmt.Println("Cleanup...")
+ ok, errs := CloseAllClients()
+ if !ok {
+ for _, err := range errs {
+ log.Errorf("Failed closing clients [%s]", err)
+ }
+ }
+ ok, errs = CloseAllPeers()
+ if !ok {
+ for _, err := range errs {
+ log.Errorf("Failed closing clients [%s]", err)
+ }
+ }
+ ok, errs = CloseAllValidators()
+ if !ok {
+ for _, err := range errs {
+ log.Errorf("Failed closing clients [%s]", err)
+ }
+ }
+ stopPKI()
+ removeFolders()
+ fmt.Println("Cleanup...done!")
+}
+
+func stopPKI() {
+ aca.Stop()
+ eca.Stop()
+ tca.Stop()
+ tlsca.Stop()
+
+ server.Stop()
+}
+
+func removeFolders() {
+ if err := os.RemoveAll(filepath.Join(os.TempDir(), "obc-crypto-tests")); err != nil {
+ fmt.Printf("Failed removing [%s] [%s]\n", "obc-crypto-tests", err)
+ }
+
+}
diff --git a/core/crypto/crypto_test.yaml b/core/crypto/crypto_test.yaml
new file mode 100644
index 00000000000..9b1e25230e0
--- /dev/null
+++ b/core/crypto/crypto_test.yaml
@@ -0,0 +1,196 @@
+###############################################################################
+#
+# CAs section
+#
+###############################################################################
+server:
+ version: "0.1"
+ port: ":50541"
+
+
+security:
+ # Can be 256 or 384
+ # Must be the same as in core.yaml
+ level: 256
+
+ # Enable/Disable multithread
+ multithreading:
+ enabled: false
+
+ # TCerts related configuration
+ tcert:
+ batch:
+ # The size of the batch of TCerts
+ size: 200
+ level: 256
+ hashAlgorithm: SHA3
+
+eca:
+ affiliations:
+ banks_and_institutions:
+ banks:
+ - bank_a
+ - bank_b
+ - bank_c
+ institutions:
+ - institution_a
+
+ users:
+ # clients
+ userthread: 1 9gvZQRwhUq9q bank_a
+ user1: 1 9gvZQRwhUq9q bank_a
+ user2: 1 9gvZQRwhUq9q bank_a
+ TestRegistrationSameEnrollIDDifferentRole: 1 9gvZQRwhUq9q bank_a
+
+ # peers
+ peer: 2 9gvZQRwhUq9q bank_a
+ peerthread: 2 9gvZQRwhUq9q bank_a
+
+ # validators
+ validator: 4 9gvZQRwhUq9q bank_a
+ validatorthread: 4 9gvZQRwhUq9q bank_a
+
+tca:
+ attribute-encryption:
+ enabled: true
+
+aca:
+ attributes:
+ attribute-entry-0: user1;bank_a;company;ACompany;2015-01-01T00:00:00-03:00;;
+ attribute-entry-1: user1;bank_a;position;Software Staff;2015-01-01T00:00:00-03:00;2015-07-12T23:59:59-03:00;
+ attribute-entry-2: user1;bank_a;position;Software Engineer;2015-07-13T00:00:00-03:00;;
+ attribute-entry-3: user2;bank_a;company;ACompany;2001-02-02T00:00:00-03:00;;
+ attribute-entry-4: user2;bank_a;position;Project Manager;2001-02-02T00:00:00-03:00;;
+ address: localhost:50541
+ server-name: acap
+ enabled: true
+
+
+###############################################################################
+#
+# Peer section
+#
+###############################################################################
+
+
+peer:
+ pki:
+ eca:
+ paddr: localhost:50541
+
+ tca:
+ paddr: localhost:50541
+
+ tlsca:
+ paddr: localhost:50541
+
+ tls:
+ enabled: false
+ rootcert:
+
+ # The server name use to verify the hostname returned by TLS handshake
+ serverhostoverride:
+
+ validator:
+ enabled: false
+
+###############################################################################
+#
+# Security section - Applied to all entities (client, NVP, VP)
+#
+###############################################################################
+security:
+ # TCerts related configuration
+ tcert:
+ batch:
+ # The size of the batch of TCerts
+ size: 10
+ attributes:
+ company: ACompany
+ position: "Software Engineer"
+ abac:
+ enabled: true
+ level: 256
+ hashAlgorithm: SHA3
+
+###############################################################################
+#
+# Test parameters section
+#
+###############################################################################
+
+logging:
+
+ # Valid logging levels are case-insensitive strings chosen from
+
+ # CRITICAL | ERROR | WARNING | NOTICE | INFO | DEBUG
+
+ # Logging 'module' names are also strings, however valid module names are
+ # defined at runtime and are not checked for validity during option
+ # processing.
+
+ # Default logging levels are specified here for each of the obc-peer
+ # commands. For commands that have subcommands, the defaults also apply to
+ # all subcommands of the command. These logging levels can be overridden
+ # on the command line using the --logging-level command-line option, or by
+ # setting the CORE_LOGGING_LEVEL environment variable.
+
+ # The logging level specification is of the form
+
+ # [[,...]=][:[[,...]=]...]
+
+ # A logging level by itself is taken as the overall default. Otherwise,
+ # overrides for individual or groups of modules can be specified using the
+ # [,...]= syntax.
+
+ # Examples:
+ # info - Set default to INFO
+ # warning:main,db=debug:chaincode=info - Override default WARNING in main,db,chaincode
+ # chaincode=info:main=debug:db=debug:warning - Same as above
+ peer: info
+ crypto: info
+ status: warning
+ stop: warning
+ login: warning
+ vm: warning
+ chaincode: warning
+
+
+
+tests:
+
+ crypto:
+
+ users:
+
+ user1:
+ enrollid: user1
+ enrollpw: 9gvZQRwhUq9q
+
+ user2:
+ enrollid: user2
+ enrollpw: 9gvZQRwhUq9q
+
+ validator:
+ enrollid: validator
+ enrollpw: 9gvZQRwhUq9q
+
+ validatorthread:
+ enrollid: validatorthread
+ enrollpw: 9gvZQRwhUq9q
+
+ peer:
+ enrollid: peer
+ enrollpw: 9gvZQRwhUq9q
+
+ peerthread:
+ enrollid: peerthread
+ enrollpw: 9gvZQRwhUq9q
+
+ TestRegistrationSameEnrollIDDifferentRole:
+ enrollid: TestRegistrationSameEnrollIDDifferentRole
+ enrollpw: 9gvZQRwhUq9q
+
+ userthread:
+ enrollid: userthread
+ enrollpw: 9gvZQRwhUq9q
diff --git a/core/crypto/node.go b/core/crypto/node.go
new file mode 100644
index 00000000000..c397e01a95c
--- /dev/null
+++ b/core/crypto/node.go
@@ -0,0 +1,31 @@
+/*
+Copyright IBM Corp. 2016 All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package crypto
+
+// Utility functions
+
+func eTypeToString(eType NodeType) string {
+ switch eType {
+ case NodeClient:
+ return "client"
+ case NodePeer:
+ return "peer"
+ case NodeValidator:
+ return "validator"
+ }
+ return "Invalid Type"
+}
diff --git a/core/crypto/node_conf.go b/core/crypto/node_conf.go
new file mode 100644
index 00000000000..ccb5dca5e95
--- /dev/null
+++ b/core/crypto/node_conf.go
@@ -0,0 +1,296 @@
+/*
+Copyright IBM Corp. 2016 All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package crypto
+
+import (
+ "errors"
+ "path/filepath"
+
+ "github.com/spf13/viper"
+)
+
+func (node *nodeImpl) initConfiguration(name string) (err error) {
+ // Set logger
+ prefix := eTypeToString(node.eType)
+
+ // Set configuration
+ node.conf = &configuration{prefix: prefix, name: name}
+ if err = node.conf.init(); err != nil {
+ return
+ }
+
+ node.Debugf("Data will be stored at [%s]", node.conf.configurationPath)
+
+ return
+}
+
+type configuration struct {
+ prefix string
+ name string
+
+ logPrefix string
+
+ rootDataPath string
+ configurationPath string
+ keystorePath string
+ rawsPath string
+ tCertsPath string
+
+ configurationPathProperty string
+ ecaPAddressProperty string
+ tcaPAddressProperty string
+ tlscaPAddressProperty string
+
+ securityLevel int
+ hashAlgorithm string
+ confidentialityProtocolVersion string
+
+ tlsServerName string
+
+ multiThreading bool
+ tCertBatchSize int
+}
+
+func (conf *configuration) init() error {
+ conf.configurationPathProperty = "peer.fileSystemPath"
+ conf.ecaPAddressProperty = "peer.pki.eca.paddr"
+ conf.tcaPAddressProperty = "peer.pki.tca.paddr"
+ conf.tlscaPAddressProperty = "peer.pki.tlsca.paddr"
+ conf.logPrefix = "[" + conf.prefix + "." + conf.name + "] "
+
+ // Check mandatory fields
+ if err := conf.checkProperty(conf.configurationPathProperty); err != nil {
+ return err
+ }
+ if err := conf.checkProperty(conf.ecaPAddressProperty); err != nil {
+ return err
+ }
+ if err := conf.checkProperty(conf.tcaPAddressProperty); err != nil {
+ return err
+ }
+ if err := conf.checkProperty(conf.tlscaPAddressProperty); err != nil {
+ return err
+ }
+
+ conf.configurationPath = viper.GetString(conf.configurationPathProperty)
+ conf.rootDataPath = conf.configurationPath
+
+ // Set configuration path
+ conf.configurationPath = filepath.Join(
+ conf.configurationPath,
+ "crypto", conf.prefix, conf.name,
+ )
+
+ // Set ks path
+ conf.keystorePath = filepath.Join(conf.configurationPath, "ks")
+
+ // Set raws path
+ conf.rawsPath = filepath.Join(conf.keystorePath, "raw")
+
+ // Set tCerts path
+ conf.tCertsPath = filepath.Join(conf.keystorePath, "tcerts")
+
+ conf.securityLevel = 384
+ if viper.IsSet("security.level") {
+ ovveride := viper.GetInt("security.level")
+ if ovveride != 0 {
+ conf.securityLevel = ovveride
+ }
+ }
+
+ conf.hashAlgorithm = "SHA3"
+ if viper.IsSet("security.hashAlgorithm") {
+ ovveride := viper.GetString("security.hashAlgorithm")
+ if ovveride != "" {
+ conf.hashAlgorithm = ovveride
+ }
+ }
+
+ conf.confidentialityProtocolVersion = "1.2"
+ if viper.IsSet("security.confidentialityProtocolVersion") {
+ ovveride := viper.GetString("security.confidentialityProtocolVersion")
+ if ovveride != "" {
+ conf.confidentialityProtocolVersion = ovveride
+ }
+ }
+
+ // Set TLS host override
+ conf.tlsServerName = "tlsca"
+ if viper.IsSet("peer.pki.tls.serverhostoverride") {
+ ovveride := viper.GetString("peer.pki.tls.serverhostoverride")
+ if ovveride != "" {
+ conf.tlsServerName = ovveride
+ }
+ }
+
+ // Set tCertBatchSize
+ conf.tCertBatchSize = 200
+ if viper.IsSet("security.tcert.batch.size") {
+ ovveride := viper.GetInt("security.tcert.batch.size")
+ if ovveride != 0 {
+ conf.tCertBatchSize = ovveride
+ }
+ }
+
+ // Set multithread
+ conf.multiThreading = false
+ if viper.IsSet("security.multithreading.enabled") {
+ conf.multiThreading = viper.GetBool("security.multithreading.enabled")
+ }
+
+ return nil
+}
+
+func (conf *configuration) checkProperty(property string) error {
+ res := viper.GetString(property)
+ if res == "" {
+ return errors.New("Property not specified in configuration file. Please check that property is set: " + property)
+ }
+ return nil
+}
+
+func (conf *configuration) getTCAPAddr() string {
+ return viper.GetString(conf.tcaPAddressProperty)
+}
+
+func (conf *configuration) getECAPAddr() string {
+ return viper.GetString(conf.ecaPAddressProperty)
+}
+
+func (conf *configuration) getTLSCAPAddr() string {
+ return viper.GetString(conf.tlscaPAddressProperty)
+}
+
+func (conf *configuration) getConfPath() string {
+ return conf.configurationPath
+}
+
+func (conf *configuration) getTCertsPath() string {
+ return conf.tCertsPath
+}
+
+func (conf *configuration) getKeyStorePath() string {
+ return conf.keystorePath
+}
+
+func (conf *configuration) getRootDatastorePath() string {
+ return conf.rootDataPath
+}
+
+func (conf *configuration) getRawsPath() string {
+ return conf.rawsPath
+}
+
+func (conf *configuration) getKeyStoreFilename() string {
+ return "db"
+}
+
+func (conf *configuration) getKeyStoreFilePath() string {
+ return filepath.Join(conf.getKeyStorePath(), conf.getKeyStoreFilename())
+}
+
+func (conf *configuration) getPathForAlias(alias string) string {
+ return filepath.Join(conf.getRawsPath(), alias)
+}
+
+func (conf *configuration) getQueryStateKeyFilename() string {
+ return "query.key"
+}
+
+func (conf *configuration) getEnrollmentKeyFilename() string {
+ return "enrollment.key"
+}
+
+func (conf *configuration) getEnrollmentCertFilename() string {
+ return "enrollment.cert"
+}
+
+func (conf *configuration) getEnrollmentIDPath() string {
+ return filepath.Join(conf.getRawsPath(), conf.getEnrollmentIDFilename())
+}
+
+func (conf *configuration) getEnrollmentIDFilename() string {
+ return "enrollment.id"
+}
+
+func (conf *configuration) getTCACertsChainFilename() string {
+ return "tca.cert.chain"
+}
+
+func (conf *configuration) getECACertsChainFilename() string {
+ return "eca.cert.chain"
+}
+
+func (conf *configuration) getTLSCACertsChainFilename() string {
+ return "tlsca.cert.chain"
+}
+
+func (conf *configuration) getTLSCACertsExternalPath() string {
+ return viper.GetString("peer.pki.tls.rootcert.file")
+}
+
+func (conf *configuration) isTLSEnabled() bool {
+ return viper.GetBool("peer.pki.tls.enabled")
+}
+
+func (conf *configuration) isTLSClientAuthEnabled() bool {
+ return viper.GetBool("peer.pki.tls.client.auth.enabled")
+}
+
+func (conf *configuration) IsMultithreadingEnabled() bool {
+ return conf.multiThreading
+}
+
+func (conf *configuration) getTCAServerName() string {
+ return conf.tlsServerName
+}
+
+func (conf *configuration) getECAServerName() string {
+ return conf.tlsServerName
+}
+
+func (conf *configuration) getTLSCAServerName() string {
+ return conf.tlsServerName
+}
+
+func (conf *configuration) getTLSKeyFilename() string {
+ return "tls.key"
+}
+
+func (conf *configuration) getTLSCertFilename() string {
+ return "tls.cert"
+}
+
+func (conf *configuration) getTLSRootCertFilename() string {
+ return "tls.cert.chain"
+}
+
+func (conf *configuration) getEnrollmentChainKeyFilename() string {
+ return "chain.key"
+}
+
+func (conf *configuration) getTCertOwnerKDFKeyFilename() string {
+ return "tca.kdf.key"
+}
+
+func (conf *configuration) getTCertBatchSize() int {
+ return conf.tCertBatchSize
+}
+
+func (conf *configuration) GetConfidentialityProtocolVersion() string {
+ return conf.confidentialityProtocolVersion
+}
diff --git a/core/crypto/node_crypto.go b/core/crypto/node_crypto.go
new file mode 100644
index 00000000000..014c75ab257
--- /dev/null
+++ b/core/crypto/node_crypto.go
@@ -0,0 +1,121 @@
+/*
+Copyright IBM Corp. 2016 All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package crypto
+
+import (
+ "crypto/x509"
+
+ ecies "github.com/hyperledger/fabric/core/crypto/primitives/ecies"
+)
+
+func (node *nodeImpl) registerCryptoEngine(enrollID, enrollPWD string) error {
+ node.Debug("Registering node crypto engine...")
+
+ // Init CLI
+ node.eciesSPI = ecies.NewSPI()
+
+ if err := node.initTLS(); err != nil {
+ node.Errorf("Failed initliazing TLS [%s].", err.Error())
+
+ return err
+ }
+
+ if err := node.retrieveECACertsChain(enrollID); err != nil {
+ node.Errorf("Failed retrieving ECA certs chain [%s].", err.Error())
+
+ return err
+ }
+
+ if err := node.retrieveTCACertsChain(enrollID); err != nil {
+ node.Errorf("Failed retrieving ECA certs chain [%s].", err.Error())
+
+ return err
+ }
+
+ if err := node.retrieveEnrollmentData(enrollID, enrollPWD); err != nil {
+ node.Errorf("Failed retrieving enrollment data [%s].", err.Error())
+
+ return err
+ }
+
+ if err := node.retrieveTLSCertificate(enrollID, enrollPWD); err != nil {
+ node.Errorf("Failed retrieving enrollment data: %s", err)
+
+ return err
+ }
+
+ node.Debug("Registering node crypto engine...done!")
+
+ return nil
+}
+
+func (node *nodeImpl) initCryptoEngine() error {
+ node.Debug("Initializing node crypto engine...")
+
+ // Init CLI
+ node.eciesSPI = ecies.NewSPI()
+
+ // Init certPools
+ node.rootsCertPool = x509.NewCertPool()
+ node.tlsCertPool = x509.NewCertPool()
+ node.ecaCertPool = x509.NewCertPool()
+ node.tcaCertPool = x509.NewCertPool()
+
+ // Load ECA certs chain
+ if err := node.loadECACertsChain(); err != nil {
+ return err
+ }
+
+ // Load TCA certs chain
+ if err := node.loadTCACertsChain(); err != nil {
+ return err
+ }
+
+ // Load enrollment secret key
+ if err := node.loadEnrollmentKey(); err != nil {
+ return err
+ }
+
+ // Load enrollment certificate and set validator ID
+ if err := node.loadEnrollmentCertificate(); err != nil {
+ return err
+ }
+
+ // Load enrollment id
+ if err := node.loadEnrollmentID(); err != nil {
+ return err
+ }
+
+ // Load enrollment chain key
+ if err := node.loadEnrollmentChainKey(); err != nil {
+ return err
+ }
+
+ // Load TLS certs chain certificate
+ if err := node.loadTLSCACertsChain(); err != nil {
+ return err
+ }
+
+ // Load tls certificate
+ if err := node.loadTLSCertificate(); err != nil {
+ return err
+ }
+
+ node.Debug("Initializing node crypto engine...done!")
+
+ return nil
+}
diff --git a/core/crypto/node_eca.go b/core/crypto/node_eca.go
new file mode 100644
index 00000000000..4b16b07cbd6
--- /dev/null
+++ b/core/crypto/node_eca.go
@@ -0,0 +1,485 @@
+/*
+Copyright IBM Corp. 2016 All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package crypto
+
+import (
+ "crypto/ecdsa"
+ "crypto/rand"
+ "crypto/x509"
+ "google/protobuf"
+ "time"
+
+ membersrvc "github.com/hyperledger/fabric/membersrvc/protos"
+
+ "encoding/asn1"
+ "errors"
+ "io/ioutil"
+
+ "github.com/golang/protobuf/proto"
+ "github.com/hyperledger/fabric/core/crypto/primitives"
+ "github.com/hyperledger/fabric/core/crypto/primitives/ecies"
+ "golang.org/x/net/context"
+ "google.golang.org/grpc"
+)
+
+var (
+ // ECertSubjectRole is the ASN1 object identifier of the subject's role.
+ ECertSubjectRole = asn1.ObjectIdentifier{2, 1, 3, 4, 5, 6, 7}
+)
+
+func (node *nodeImpl) retrieveECACertsChain(userID string) error {
+ if !node.ks.certMissing(node.conf.getECACertsChainFilename()) {
+ return nil
+ }
+
+ // Retrieve ECA certificate and verify it
+ ecaCertRaw, err := node.getECACertificate()
+ if err != nil {
+ node.Errorf("Failed getting ECA certificate [%s].", err.Error())
+
+ return err
+ }
+ node.Debugf("ECA certificate [% x].", ecaCertRaw)
+
+ // TODO: Test ECA cert againt root CA
+ // TODO: check response.Cert against rootCA
+ x509ECACert, err := primitives.DERToX509Certificate(ecaCertRaw)
+ if err != nil {
+ node.Errorf("Failed parsing ECA certificate [%s].", err.Error())
+
+ return err
+ }
+
+ // Prepare ecaCertPool
+ node.ecaCertPool = x509.NewCertPool()
+ node.ecaCertPool.AddCert(x509ECACert)
+
+ // Store ECA cert
+ node.Debugf("Storing ECA certificate for [%s]...", userID)
+
+ if err := node.ks.storeCert(node.conf.getECACertsChainFilename(), ecaCertRaw); err != nil {
+ node.Errorf("Failed storing eca certificate [%s].", err.Error())
+ return err
+ }
+
+ return nil
+}
+
+func (node *nodeImpl) retrieveEnrollmentData(enrollID, enrollPWD string) error {
+ if !node.ks.certMissing(node.conf.getEnrollmentCertFilename()) {
+ return nil
+ }
+
+ key, enrollCertRaw, enrollChainKey, err := node.getEnrollmentCertificateFromECA(enrollID, enrollPWD)
+ if err != nil {
+ node.Errorf("Failed getting enrollment certificate [id=%s]: [%s]", enrollID, err)
+
+ return err
+ }
+ node.Debugf("Enrollment certificate [% x].", enrollCertRaw)
+
+ node.Debugf("Storing enrollment data for user [%s]...", enrollID)
+
+ // Store enrollment id
+ err = ioutil.WriteFile(node.conf.getEnrollmentIDPath(), []byte(enrollID), 0700)
+ if err != nil {
+ node.Errorf("Failed storing enrollment certificate [id=%s]: [%s]", enrollID, err)
+ return err
+ }
+
+ // Store enrollment key
+ if err := node.ks.storePrivateKey(node.conf.getEnrollmentKeyFilename(), key); err != nil {
+ node.Errorf("Failed storing enrollment key [id=%s]: [%s]", enrollID, err)
+ return err
+ }
+
+ // Store enrollment cert
+ if err := node.ks.storeCert(node.conf.getEnrollmentCertFilename(), enrollCertRaw); err != nil {
+ node.Errorf("Failed storing enrollment certificate [id=%s]: [%s]", enrollID, err)
+ return err
+ }
+
+ // Code for confidentiality 1.2
+ // Store enrollment chain key
+ if node.eType == NodeValidator {
+ node.Debugf("Enrollment chain key for validator [%s]...", enrollID)
+ // enrollChainKey is a secret key
+
+ node.Debugf("key [%s]...", string(enrollChainKey))
+
+ key, err := primitives.PEMtoPrivateKey(enrollChainKey, nil)
+ if err != nil {
+ node.Errorf("Failed unmarshalling enrollment chain key [id=%s]: [%s]", enrollID, err)
+ return err
+ }
+
+ if err := node.ks.storePrivateKey(node.conf.getEnrollmentChainKeyFilename(), key); err != nil {
+ node.Errorf("Failed storing enrollment chain key [id=%s]: [%s]", enrollID, err)
+ return err
+ }
+ } else {
+ node.Debugf("Enrollment chain key for non-validator [%s]...", enrollID)
+ // enrollChainKey is a public key
+
+ key, err := primitives.PEMtoPublicKey(enrollChainKey, nil)
+ if err != nil {
+ node.Errorf("Failed unmarshalling enrollment chain key [id=%s]: [%s]", enrollID, err)
+ return err
+ }
+ node.Debugf("Key decoded from PEM [%s]...", enrollID)
+
+ if err := node.ks.storePublicKey(node.conf.getEnrollmentChainKeyFilename(), key); err != nil {
+ node.Errorf("Failed storing enrollment chain key [id=%s]: [%s]", enrollID, err)
+ return err
+ }
+ }
+
+ return nil
+}
+
+func (node *nodeImpl) loadEnrollmentKey() error {
+ node.Debug("Loading enrollment key...")
+
+ enrollPrivKey, err := node.ks.loadPrivateKey(node.conf.getEnrollmentKeyFilename())
+ if err != nil {
+ node.Errorf("Failed loading enrollment private key [%s].", err.Error())
+
+ return err
+ }
+
+ node.enrollPrivKey = enrollPrivKey.(*ecdsa.PrivateKey)
+
+ return nil
+}
+
+func (node *nodeImpl) loadEnrollmentCertificate() error {
+ node.Debug("Loading enrollment certificate...")
+
+ cert, der, err := node.ks.loadCertX509AndDer(node.conf.getEnrollmentCertFilename())
+ if err != nil {
+ node.Errorf("Failed parsing enrollment certificate [%s].", err.Error())
+
+ return err
+ }
+ node.enrollCert = cert
+
+ // TODO: move this to retrieve
+ pk := node.enrollCert.PublicKey.(*ecdsa.PublicKey)
+ err = primitives.VerifySignCapability(node.enrollPrivKey, pk)
+ if err != nil {
+ node.Errorf("Failed checking enrollment certificate against enrollment key [%s].", err.Error())
+
+ return err
+ }
+
+ // Set node ID
+ node.id = primitives.Hash(der)
+ node.Debugf("Setting id to [% x].", node.id)
+
+ // Set eCertHash
+ node.enrollCertHash = primitives.Hash(der)
+ node.Debugf("Setting enrollCertHash to [% x].", node.enrollCertHash)
+
+ return nil
+}
+
+func (node *nodeImpl) loadEnrollmentID() error {
+ node.Debugf("Loading enrollment id at [%s]...", node.conf.getEnrollmentIDPath())
+
+ enrollID, err := ioutil.ReadFile(node.conf.getEnrollmentIDPath())
+ if err != nil {
+ node.Errorf("Failed loading enrollment id [%s].", err.Error())
+
+ return err
+ }
+
+ // Set enrollment ID
+ node.enrollID = string(enrollID)
+ node.Debugf("Setting enrollment id to [%s].", node.enrollID)
+
+ return nil
+}
+
+func (node *nodeImpl) loadEnrollmentChainKey() error {
+ node.Debug("Loading enrollment chain key...")
+
+ // Code for confidentiality 1.2
+ if node.eType == NodeValidator {
+ // enrollChainKey is a secret key
+ enrollChainKey, err := node.ks.loadPrivateKey(node.conf.getEnrollmentChainKeyFilename())
+ if err != nil {
+ node.Errorf("Failed loading enrollment chain key: [%s]", err)
+ return err
+ }
+ node.enrollChainKey = enrollChainKey
+ } else {
+ // enrollChainKey is a public key
+ enrollChainKey, err := node.ks.loadPublicKey(node.conf.getEnrollmentChainKeyFilename())
+ if err != nil {
+ node.Errorf("Failed load enrollment chain key: [%s]", err)
+ return err
+ }
+ node.enrollChainKey = enrollChainKey
+ }
+
+ return nil
+}
+
+func (node *nodeImpl) loadECACertsChain() error {
+ node.Debug("Loading ECA certificates chain...")
+
+ pem, err := node.ks.loadCert(node.conf.getECACertsChainFilename())
+ if err != nil {
+ node.Errorf("Failed loading ECA certificates chain [%s].", err.Error())
+
+ return err
+ }
+
+ ok := node.ecaCertPool.AppendCertsFromPEM(pem)
+ if !ok {
+ node.Error("Failed appending ECA certificates chain.")
+
+ return errors.New("Failed appending ECA certificates chain.")
+ }
+
+ return nil
+}
+
+func (node *nodeImpl) getECAClient() (*grpc.ClientConn, membersrvc.ECAPClient, error) {
+ node.Debug("Getting ECA client...")
+
+ conn, err := node.getClientConn(node.conf.getECAPAddr(), node.conf.getECAServerName())
+ if err != nil {
+ node.Errorf("Failed getting client connection: [%s]", err)
+ }
+
+ client := membersrvc.NewECAPClient(conn)
+
+ node.Debug("Getting ECA client...done")
+
+ return conn, client, nil
+}
+
+func (node *nodeImpl) callECAReadCACertificate(ctx context.Context, opts ...grpc.CallOption) (*membersrvc.Cert, error) {
+ // Get an ECA Client
+ sock, ecaP, err := node.getECAClient()
+ defer sock.Close()
+
+ // Issue the request
+ cert, err := ecaP.ReadCACertificate(ctx, &membersrvc.Empty{}, opts...)
+ if err != nil {
+ node.Errorf("Failed requesting read certificate [%s].", err.Error())
+
+ return nil, err
+ }
+
+ return cert, nil
+}
+
+func (node *nodeImpl) callECAReadCertificate(ctx context.Context, in *membersrvc.ECertReadReq, opts ...grpc.CallOption) (*membersrvc.CertPair, error) {
+ // Get an ECA Client
+ sock, ecaP, err := node.getECAClient()
+ defer sock.Close()
+
+ // Issue the request
+ resp, err := ecaP.ReadCertificatePair(ctx, in, opts...)
+ if err != nil {
+ node.Errorf("Failed requesting read certificate [%s].", err.Error())
+
+ return nil, err
+ }
+
+ return resp, nil
+}
+
+func (node *nodeImpl) callECAReadCertificateByHash(ctx context.Context, in *membersrvc.Hash, opts ...grpc.CallOption) (*membersrvc.CertPair, error) {
+ // Get an ECA Client
+ sock, ecaP, err := node.getECAClient()
+ defer sock.Close()
+
+ // Issue the request
+ resp, err := ecaP.ReadCertificateByHash(ctx, in, opts...)
+ if err != nil {
+ node.Errorf("Failed requesting read certificate [%s].", err.Error())
+
+ return nil, err
+ }
+
+ return &membersrvc.CertPair{Sign: resp.Cert, Enc: nil}, nil
+}
+
+func (node *nodeImpl) getEnrollmentCertificateFromECA(id, pw string) (interface{}, []byte, []byte, error) {
+ // Get a new ECA Client
+ sock, ecaP, err := node.getECAClient()
+ defer sock.Close()
+
+ // Run the protocol
+
+ signPriv, err := primitives.NewECDSAKey()
+ if err != nil {
+ node.Errorf("Failed generating ECDSA key [%s].", err.Error())
+
+ return nil, nil, nil, err
+ }
+ signPub, err := x509.MarshalPKIXPublicKey(&signPriv.PublicKey)
+ if err != nil {
+ node.Errorf("Failed mashalling ECDSA key [%s].", err.Error())
+
+ return nil, nil, nil, err
+ }
+
+ encPriv, err := primitives.NewECDSAKey()
+ if err != nil {
+ node.Errorf("Failed generating Encryption key [%s].", err.Error())
+
+ return nil, nil, nil, err
+ }
+ encPub, err := x509.MarshalPKIXPublicKey(&encPriv.PublicKey)
+ if err != nil {
+ node.Errorf("Failed marshalling Encryption key [%s].", err.Error())
+
+ return nil, nil, nil, err
+ }
+
+ req := &membersrvc.ECertCreateReq{
+ Ts: &google_protobuf.Timestamp{Seconds: time.Now().Unix(), Nanos: 0},
+ Id: &membersrvc.Identity{Id: id},
+ Tok: &membersrvc.Token{Tok: []byte(pw)},
+ Sign: &membersrvc.PublicKey{Type: membersrvc.CryptoType_ECDSA, Key: signPub},
+ Enc: &membersrvc.PublicKey{Type: membersrvc.CryptoType_ECDSA, Key: encPub},
+ Sig: nil}
+
+ resp, err := ecaP.CreateCertificatePair(context.Background(), req)
+ if err != nil {
+ node.Errorf("Failed invoking CreateCertficatePair [%s].", err.Error())
+
+ return nil, nil, nil, err
+ }
+
+ if resp.FetchResult != nil && resp.FetchResult.Status != membersrvc.FetchAttrsResult_SUCCESS {
+ node.Warning(resp.FetchResult.Msg)
+ }
+ //out, err := rsa.DecryptPKCS1v15(rand.Reader, encPriv, resp.Tok.Tok)
+ spi := ecies.NewSPI()
+ eciesKey, err := spi.NewPrivateKey(nil, encPriv)
+ if err != nil {
+ node.Errorf("Failed parsing decrypting key [%s].", err.Error())
+
+ return nil, nil, nil, err
+ }
+
+ ecies, err := spi.NewAsymmetricCipherFromPublicKey(eciesKey)
+ if err != nil {
+ node.Errorf("Failed creating asymmetrinc cipher [%s].", err.Error())
+
+ return nil, nil, nil, err
+ }
+
+ out, err := ecies.Process(resp.Tok.Tok)
+ if err != nil {
+ node.Errorf("Failed decrypting toke [%s].", err.Error())
+
+ return nil, nil, nil, err
+ }
+
+ req.Tok.Tok = out
+ req.Sig = nil
+
+ hash := primitives.NewHash()
+ raw, _ := proto.Marshal(req)
+ hash.Write(raw)
+
+ r, s, err := ecdsa.Sign(rand.Reader, signPriv, hash.Sum(nil))
+ if err != nil {
+ node.Errorf("Failed signing [%s].", err.Error())
+
+ return nil, nil, nil, err
+ }
+ R, _ := r.MarshalText()
+ S, _ := s.MarshalText()
+ req.Sig = &membersrvc.Signature{Type: membersrvc.CryptoType_ECDSA, R: R, S: S}
+
+ resp, err = ecaP.CreateCertificatePair(context.Background(), req)
+ if err != nil {
+ node.Errorf("Failed invoking CreateCertificatePair [%s].", err.Error())
+
+ return nil, nil, nil, err
+ }
+
+ // Verify response
+
+ // Verify cert for signing
+ node.Debugf("Enrollment certificate for signing [% x]", primitives.Hash(resp.Certs.Sign))
+
+ x509SignCert, err := primitives.DERToX509Certificate(resp.Certs.Sign)
+ if err != nil {
+ node.Errorf("Failed parsing signing enrollment certificate for signing: [%s]", err)
+
+ return nil, nil, nil, err
+ }
+
+ _, err = primitives.GetCriticalExtension(x509SignCert, ECertSubjectRole)
+ if err != nil {
+ node.Errorf("Failed parsing ECertSubjectRole in enrollment certificate for signing: [%s]", err)
+
+ return nil, nil, nil, err
+ }
+
+ err = primitives.CheckCertAgainstSKAndRoot(x509SignCert, signPriv, node.ecaCertPool)
+ if err != nil {
+ node.Errorf("Failed checking signing enrollment certificate for signing: [%s]", err)
+
+ return nil, nil, nil, err
+ }
+
+ // Verify cert for encrypting
+ node.Debugf("Enrollment certificate for encrypting [% x]", primitives.Hash(resp.Certs.Enc))
+
+ x509EncCert, err := primitives.DERToX509Certificate(resp.Certs.Enc)
+ if err != nil {
+ node.Errorf("Failed parsing signing enrollment certificate for encrypting: [%s]", err)
+
+ return nil, nil, nil, err
+ }
+
+ _, err = primitives.GetCriticalExtension(x509EncCert, ECertSubjectRole)
+ if err != nil {
+ node.Errorf("Failed parsing ECertSubjectRole in enrollment certificate for encrypting: [%s]", err)
+
+ return nil, nil, nil, err
+ }
+
+ err = primitives.CheckCertAgainstSKAndRoot(x509EncCert, encPriv, node.ecaCertPool)
+ if err != nil {
+ node.Errorf("Failed checking signing enrollment certificate for encrypting: [%s]", err)
+
+ return nil, nil, nil, err
+ }
+
+ return signPriv, resp.Certs.Sign, resp.Pkchain, nil
+}
+
+func (node *nodeImpl) getECACertificate() ([]byte, error) {
+ responce, err := node.callECAReadCACertificate(context.Background())
+ if err != nil {
+ node.Errorf("Failed requesting ECA certificate [%s].", err.Error())
+
+ return nil, err
+ }
+
+ return responce.Cert, nil
+}
diff --git a/core/crypto/node_grpc.go b/core/crypto/node_grpc.go
new file mode 100644
index 00000000000..9d1fcb6b0f1
--- /dev/null
+++ b/core/crypto/node_grpc.go
@@ -0,0 +1,75 @@
+/*
+Copyright IBM Corp. 2016 All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package crypto
+
+import (
+ "crypto/tls"
+ "crypto/x509"
+ "errors"
+
+ "google.golang.org/grpc"
+ "google.golang.org/grpc/credentials"
+
+ "github.com/hyperledger/fabric/core/comm"
+)
+
+func (node *nodeImpl) initTLS() error {
+ node.Debug("Initiliazing TLS...")
+
+ if node.conf.isTLSEnabled() {
+ pem, err := node.ks.loadExternalCert(node.conf.getTLSCACertsExternalPath())
+ if err != nil {
+ node.Errorf("Failed loading TLSCA certificates chain [%s].", err.Error())
+
+ return err
+ }
+
+ node.tlsCertPool = x509.NewCertPool()
+ ok := node.tlsCertPool.AppendCertsFromPEM(pem)
+ if !ok {
+ node.Error("Failed appending TLSCA certificates chain.")
+
+ return errors.New("Failed appending TLSCA certificates chain.")
+ }
+ node.Debug("Initiliazing TLS...Done")
+ } else {
+ node.Debug("Initiliazing TLS...Disabled!!!")
+ }
+
+ return nil
+}
+
+func (node *nodeImpl) getClientConn(address string, serverName string) (*grpc.ClientConn, error) {
+ node.Debugf("Dial to addr:[%s], with serverName:[%s]...", address, serverName)
+
+ if node.conf.isTLSEnabled() {
+ node.Debug("TLS enabled...")
+
+ config := tls.Config{
+ InsecureSkipVerify: false,
+ RootCAs: node.tlsCertPool,
+ ServerName: serverName,
+ }
+ if node.conf.isTLSClientAuthEnabled() {
+
+ }
+
+ return comm.NewClientConnectionWithAddress(address, false, true, credentials.NewTLS(&config))
+ }
+ node.Debug("TLS disabled...")
+ return comm.NewClientConnectionWithAddress(address, false, false, nil)
+}
diff --git a/core/crypto/node_impl.go b/core/crypto/node_impl.go
new file mode 100644
index 00000000000..5fff6479cef
--- /dev/null
+++ b/core/crypto/node_impl.go
@@ -0,0 +1,213 @@
+/*
+Copyright IBM Corp. 2016 All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package crypto
+
+import (
+ "crypto/ecdsa"
+ "crypto/x509"
+
+ "github.com/hyperledger/fabric/core/crypto/primitives"
+ "github.com/hyperledger/fabric/core/crypto/utils"
+)
+
+// Public Struct
+
+type nodeImpl struct {
+ isRegistered bool
+ isInitialized bool
+
+ // Node type
+ eType NodeType
+
+ // Configuration
+ conf *configuration
+
+ // keyStore
+ ks *keyStore
+
+ // Certs Pool
+ rootsCertPool *x509.CertPool
+ tlsCertPool *x509.CertPool
+ ecaCertPool *x509.CertPool
+ tcaCertPool *x509.CertPool
+
+ // 48-bytes identifier
+ id []byte
+
+ // Enrollment Certificate and private key
+ enrollID string
+ enrollCert *x509.Certificate
+ enrollPrivKey *ecdsa.PrivateKey
+ enrollCertHash []byte
+
+ // Enrollment Chain
+ enrollChainKey interface{}
+
+ // TLS
+ tlsCert *x509.Certificate
+
+ // Crypto SPI
+ eciesSPI primitives.AsymmetricCipherSPI
+}
+
+type registerFunc func(eType NodeType, name string, pwd []byte, enrollID, enrollPWD string) error
+type initalizationFunc func(eType NodeType, name string, pwd []byte) error
+
+func (node *nodeImpl) GetType() NodeType {
+ return node.eType
+}
+
+func (node *nodeImpl) GetName() string {
+ return node.conf.name
+}
+
+func (node *nodeImpl) IsInitialized() bool {
+ return node.isInitialized
+}
+
+func (node *nodeImpl) setInitialized() {
+ node.isInitialized = true
+}
+
+func (node *nodeImpl) IsRegistered() bool {
+ return node.isRegistered
+}
+
+func (node *nodeImpl) setRegistered() {
+ node.isRegistered = true
+}
+
+func (node *nodeImpl) register(eType NodeType, name string, pwd []byte, enrollID, enrollPWD string, regFunc registerFunc) error {
+ // Set entity type
+ node.eType = eType
+
+ // Init Conf
+ if err := node.initConfiguration(name); err != nil {
+ node.Errorf("Failed initiliazing configuration [%s]: [%s].", enrollID, err)
+ return err
+ }
+
+ // Initialize keystore
+ err := node.initKeyStore(pwd)
+ if err != nil {
+ if err == utils.ErrKeyStoreAlreadyInitialized {
+ node.Error("Keystore already initialized.")
+ } else {
+ node.Errorf("Failed initiliazing keystore [%s].", err.Error())
+ }
+ return err
+ }
+
+ if node.IsRegistered() {
+ return utils.ErrAlreadyRegistered
+ }
+ if node.IsInitialized() {
+ return utils.ErrAlreadyInitialized
+ }
+
+ err = node.nodeRegister(eType, name, pwd, enrollID, enrollPWD)
+ if err != nil {
+ return err
+ }
+
+ if regFunc != nil {
+ err = regFunc(eType, name, pwd, enrollID, enrollPWD)
+ if err != nil {
+ return err
+ }
+ }
+
+ node.setRegistered()
+ node.Debugf("Registration of node [%s] with name [%s] completed", eType, name)
+
+ return nil
+}
+
+func (node *nodeImpl) nodeRegister(eType NodeType, name string, pwd []byte, enrollID, enrollPWD string) error {
+ // Register crypto engine
+ err := node.registerCryptoEngine(enrollID, enrollPWD)
+ if err != nil {
+ node.Errorf("Failed registering node crypto engine [%s].", err.Error())
+ return err
+ }
+
+ return nil
+}
+
+func (node *nodeImpl) init(eType NodeType, name string, pwd []byte, initFunc initalizationFunc) error {
+ // Set entity type
+ node.eType = eType
+
+ // Init Conf
+ if err := node.initConfiguration(name); err != nil {
+ node.Errorf("Failed initiliazing configuration: [%s]", err)
+ return err
+ }
+
+ // Initialize keystore
+ err := node.initKeyStore(pwd)
+ if err != nil {
+ if err == utils.ErrKeyStoreAlreadyInitialized {
+ node.Error("Keystore already initialized.")
+ } else {
+ node.Errorf("Failed initiliazing keystore [%s].", err.Error())
+ }
+ return err
+ }
+
+ if node.IsInitialized() {
+ return utils.ErrAlreadyInitialized
+ }
+
+ err = node.nodeInit(eType, name, pwd)
+ if err != nil {
+ return err
+ }
+
+ if initFunc != nil {
+ err = initFunc(eType, name, pwd)
+ if err != nil {
+ return err
+ }
+ }
+
+ node.setInitialized()
+
+ return nil
+}
+
+func (node *nodeImpl) nodeInit(eType NodeType, name string, pwd []byte) error {
+ // Init crypto engine
+ err := node.initCryptoEngine()
+ if err != nil {
+ node.Errorf("Failed initiliazing crypto engine [%s]. %s", err.Error(), utils.ErrRegistrationRequired.Error())
+ return err
+ }
+
+ return nil
+}
+
+func (node *nodeImpl) close() error {
+ // Close keystore
+ var err error
+
+ if node.ks != nil {
+ err = node.ks.close()
+ }
+
+ return err
+}
diff --git a/core/crypto/node_ks.go b/core/crypto/node_ks.go
new file mode 100644
index 00000000000..1a12d7cb32e
--- /dev/null
+++ b/core/crypto/node_ks.go
@@ -0,0 +1,415 @@
+/*
+Copyright IBM Corp. 2016 All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package crypto
+
+import (
+ "crypto/x509"
+ "database/sql"
+ "io/ioutil"
+ "os"
+ "path/filepath"
+ "sync"
+
+ "github.com/hyperledger/fabric/core/crypto/utils"
+
+ // Required to successfully initialized the driver
+ "github.com/hyperledger/fabric/core/crypto/primitives"
+ _ "github.com/mattn/go-sqlite3"
+)
+
+/*
+var (
+ defaultCerts = make(map[string][]byte)
+)
+
+func addDefaultCert(key string, cert []byte) error {
+ log.Debugf("Adding Default Cert [%s][%s]", key, utils.EncodeBase64(cert))
+
+ der, err := utils.PEMtoDER(cert)
+ if err != nil {
+ log.Errorf("Failed adding default cert: [%s]", err)
+
+ return err
+ }
+
+ defaultCerts[key] = der
+
+ return nil
+}
+*/
+
+func (node *nodeImpl) initKeyStore(pwd []byte) error {
+ ks := keyStore{}
+ if err := ks.init(node, pwd); err != nil {
+ return err
+ }
+ node.ks = &ks
+
+ /*
+ // Add default certs
+ for key, value := range defaultCerts {
+ node.debug("Adding Default Cert to the keystore [%s][%s]", key, utils.EncodeBase64(value))
+ ks.storeCert(key, value)
+ }
+ */
+
+ return nil
+}
+
+type keyStore struct {
+ node *nodeImpl
+
+ isOpen bool
+
+ pwd []byte
+
+ // backend
+ sqlDB *sql.DB
+
+ // Sync
+ m sync.Mutex
+}
+
+func (ks *keyStore) init(node *nodeImpl, pwd []byte) error {
+ ks.m.Lock()
+ defer ks.m.Unlock()
+
+ if ks.isOpen {
+ return utils.ErrKeyStoreAlreadyInitialized
+ }
+
+ ks.node = node
+ ks.pwd = utils.Clone(pwd)
+
+ err := ks.createKeyStoreIfNotExists()
+ if err != nil {
+ return err
+ }
+
+ err = ks.openKeyStore()
+ if err != nil {
+ return err
+ }
+
+ return nil
+}
+
+func (ks *keyStore) isAliasSet(alias string) bool {
+ missing, _ := utils.FilePathMissing(ks.node.conf.getPathForAlias(alias))
+ if missing {
+ return false
+ }
+
+ return true
+}
+
+func (ks *keyStore) storePrivateKey(alias string, privateKey interface{}) error {
+ rawKey, err := primitives.PrivateKeyToPEM(privateKey, ks.pwd)
+ if err != nil {
+ ks.node.Errorf("Failed converting private key to PEM [%s]: [%s]", alias, err)
+ return err
+ }
+
+ err = ioutil.WriteFile(ks.node.conf.getPathForAlias(alias), rawKey, 0700)
+ if err != nil {
+ ks.node.Errorf("Failed storing private key [%s]: [%s]", alias, err)
+ return err
+ }
+
+ return nil
+}
+
+func (ks *keyStore) storePrivateKeyInClear(alias string, privateKey interface{}) error {
+ rawKey, err := primitives.PrivateKeyToPEM(privateKey, nil)
+ if err != nil {
+ ks.node.Errorf("Failed converting private key to PEM [%s]: [%s]", alias, err)
+ return err
+ }
+
+ err = ioutil.WriteFile(ks.node.conf.getPathForAlias(alias), rawKey, 0700)
+ if err != nil {
+ ks.node.Errorf("Failed storing private key [%s]: [%s]", alias, err)
+ return err
+ }
+
+ return nil
+}
+
+func (ks *keyStore) deletePrivateKeyInClear(alias string) error {
+ return os.Remove(ks.node.conf.getPathForAlias(alias))
+}
+
+func (ks *keyStore) loadPrivateKey(alias string) (interface{}, error) {
+ path := ks.node.conf.getPathForAlias(alias)
+ ks.node.Debugf("Loading private key [%s] at [%s]...", alias, path)
+
+ raw, err := ioutil.ReadFile(path)
+ if err != nil {
+ ks.node.Errorf("Failed loading private key [%s]: [%s].", alias, err.Error())
+
+ return nil, err
+ }
+
+ privateKey, err := primitives.PEMtoPrivateKey(raw, ks.pwd)
+ if err != nil {
+ ks.node.Errorf("Failed parsing private key [%s]: [%s].", alias, err.Error())
+
+ return nil, err
+ }
+
+ return privateKey, nil
+}
+
+func (ks *keyStore) storePublicKey(alias string, publicKey interface{}) error {
+ rawKey, err := primitives.PublicKeyToPEM(publicKey, ks.pwd)
+ if err != nil {
+ ks.node.Errorf("Failed converting public key to PEM [%s]: [%s]", alias, err)
+ return err
+ }
+
+ err = ioutil.WriteFile(ks.node.conf.getPathForAlias(alias), rawKey, 0700)
+ if err != nil {
+ ks.node.Errorf("Failed storing private key [%s]: [%s]", alias, err)
+ return err
+ }
+
+ return nil
+}
+
+func (ks *keyStore) loadPublicKey(alias string) (interface{}, error) {
+ path := ks.node.conf.getPathForAlias(alias)
+ ks.node.Debugf("Loading public key [%s] at [%s]...", alias, path)
+
+ raw, err := ioutil.ReadFile(path)
+ if err != nil {
+ ks.node.Errorf("Failed loading public key [%s]: [%s].", alias, err.Error())
+
+ return nil, err
+ }
+
+ privateKey, err := primitives.PEMtoPublicKey(raw, ks.pwd)
+ if err != nil {
+ ks.node.Errorf("Failed parsing private key [%s]: [%s].", alias, err.Error())
+
+ return nil, err
+ }
+
+ return privateKey, nil
+}
+
+func (ks *keyStore) storeKey(alias string, key []byte) error {
+ pem, err := primitives.AEStoEncryptedPEM(key, ks.pwd)
+ if err != nil {
+ ks.node.Errorf("Failed converting key to PEM [%s]: [%s]", alias, err)
+ return err
+ }
+
+ err = ioutil.WriteFile(ks.node.conf.getPathForAlias(alias), pem, 0700)
+ if err != nil {
+ ks.node.Errorf("Failed storing key [%s]: [%s]", alias, err)
+ return err
+ }
+
+ return nil
+}
+
+func (ks *keyStore) loadKey(alias string) ([]byte, error) {
+ path := ks.node.conf.getPathForAlias(alias)
+ ks.node.Debugf("Loading key [%s] at [%s]...", alias, path)
+
+ pem, err := ioutil.ReadFile(path)
+ if err != nil {
+ ks.node.Errorf("Failed loading key [%s]: [%s].", alias, err.Error())
+
+ return nil, err
+ }
+
+ key, err := primitives.PEMtoAES(pem, ks.pwd)
+ if err != nil {
+ ks.node.Errorf("Failed parsing key [%s]: [%s]", alias, err)
+
+ return nil, err
+ }
+
+ return key, nil
+}
+
+func (ks *keyStore) storeCert(alias string, der []byte) error {
+ err := ioutil.WriteFile(ks.node.conf.getPathForAlias(alias), primitives.DERCertToPEM(der), 0700)
+ if err != nil {
+ ks.node.Errorf("Failed storing certificate [%s]: [%s]", alias, err)
+ return err
+ }
+
+ return nil
+}
+
+func (ks *keyStore) certMissing(alias string) bool {
+ return !ks.isAliasSet(alias)
+}
+
+func (ks *keyStore) deleteCert(alias string) error {
+ return os.Remove(ks.node.conf.getPathForAlias(alias))
+}
+
+func (ks *keyStore) loadCert(alias string) ([]byte, error) {
+ path := ks.node.conf.getPathForAlias(alias)
+ ks.node.Debugf("Loading certificate [%s] at [%s]...", alias, path)
+
+ pem, err := ioutil.ReadFile(path)
+ if err != nil {
+ ks.node.Errorf("Failed loading certificate [%s]: [%s].", alias, err.Error())
+
+ return nil, err
+ }
+
+ return pem, nil
+}
+
+func (ks *keyStore) loadExternalCert(path string) ([]byte, error) {
+ ks.node.Debugf("Loading external certificate at [%s]...", path)
+
+ pem, err := ioutil.ReadFile(path)
+ if err != nil {
+ ks.node.Errorf("Failed loading external certificate: [%s].", err.Error())
+
+ return nil, err
+ }
+
+ return pem, nil
+}
+
+func (ks *keyStore) loadCertX509AndDer(alias string) (*x509.Certificate, []byte, error) {
+ path := ks.node.conf.getPathForAlias(alias)
+ ks.node.Debugf("Loading certificate [%s] at [%s]...", alias, path)
+
+ pem, err := ioutil.ReadFile(path)
+ if err != nil {
+ ks.node.Errorf("Failed loading certificate [%s]: [%s].", alias, err.Error())
+
+ return nil, nil, err
+ }
+
+ cert, der, err := primitives.PEMtoCertificateAndDER(pem)
+ if err != nil {
+ ks.node.Errorf("Failed parsing certificate [%s]: [%s].", alias, err.Error())
+
+ return nil, nil, err
+ }
+
+ return cert, der, nil
+}
+
+func (ks *keyStore) close() error {
+ ks.node.Debug("Closing keystore...")
+ err := ks.sqlDB.Close()
+
+ if err != nil {
+ ks.node.Errorf("Failed closing keystore [%s].", err.Error())
+ } else {
+ ks.node.Debug("Closing keystore...done!")
+ }
+
+ ks.isOpen = false
+ return err
+}
+
+func (ks *keyStore) createKeyStoreIfNotExists() error {
+ // Check keystore directory
+ ksPath := ks.node.conf.getKeyStorePath()
+ missing, err := utils.DirMissingOrEmpty(ksPath)
+ ks.node.Debugf("Keystore path [%s] missing [%t]: [%s]", ksPath, missing, utils.ErrToString(err))
+
+ if !missing {
+ // Check keystore file
+ missing, err = utils.FileMissing(ks.node.conf.getKeyStorePath(), ks.node.conf.getKeyStoreFilename())
+ ks.node.Debugf("Keystore [%s] missing [%t]:[%s]", ks.node.conf.getKeyStoreFilePath(), missing, utils.ErrToString(err))
+ }
+
+ if missing {
+ err := ks.createKeyStore()
+ if err != nil {
+ ks.node.Errorf("Failed creating db At [%s]: [%s]", ks.node.conf.getKeyStoreFilePath(), err.Error())
+ return nil
+ }
+ }
+
+ return nil
+}
+
+func (ks *keyStore) createKeyStore() error {
+ // Create keystore directory root if it doesn't exist yet
+ ksPath := ks.node.conf.getKeyStorePath()
+ ks.node.Debugf("Creating Keystore at [%s]...", ksPath)
+
+ missing, err := utils.FileMissing(ksPath, ks.node.conf.getKeyStoreFilename())
+ if !missing {
+ ks.node.Debugf("Creating Keystore at [%s]. Keystore already there", ksPath)
+ return nil
+ }
+
+ os.MkdirAll(ksPath, 0755)
+
+ // Create Raw material folder
+ os.MkdirAll(ks.node.conf.getRawsPath(), 0755)
+
+ // Create DB
+ ks.node.Debug("Open Keystore DB...")
+ db, err := sql.Open("sqlite3", filepath.Join(ksPath, ks.node.conf.getKeyStoreFilename()))
+ if err != nil {
+ return err
+ }
+
+ ks.node.Debug("Ping Keystore DB...")
+ err = db.Ping()
+ if err != nil {
+ ks.node.Errorf("Failend pinged keystore DB: [%s]", err)
+
+ return err
+ }
+ defer db.Close()
+
+ ks.node.Debugf("Keystore created at [%s].", ksPath)
+ return nil
+}
+
+func (ks *keyStore) deleteKeyStore() error {
+ ks.node.Debugf("Removing KeyStore at [%s].", ks.node.conf.getKeyStorePath())
+
+ return os.RemoveAll(ks.node.conf.getKeyStorePath())
+}
+
+func (ks *keyStore) openKeyStore() error {
+ if ks.isOpen {
+ return nil
+ }
+
+ // Open DB
+ ksPath := ks.node.conf.getKeyStorePath()
+
+ sqlDB, err := sql.Open("sqlite3", filepath.Join(ksPath, ks.node.conf.getKeyStoreFilename()))
+ if err != nil {
+ ks.node.Errorf("Error opening keystore%s", err.Error())
+ return err
+ }
+ ks.isOpen = true
+ ks.sqlDB = sqlDB
+
+ ks.node.Debugf("Keystore opened at [%s]...done", ksPath)
+
+ return nil
+}
diff --git a/core/crypto/node_log.go b/core/crypto/node_log.go
new file mode 100644
index 00000000000..36c06250c87
--- /dev/null
+++ b/core/crypto/node_log.go
@@ -0,0 +1,53 @@
+/*
+Copyright IBM Corp. 2016 All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package crypto
+
+func (node *nodeImpl) prependPrefix(args []interface{}) []interface{} {
+ return append([]interface{}{node.conf.logPrefix}, args...)
+}
+
+func (node *nodeImpl) Infof(format string, args ...interface{}) {
+ log.Infof(node.conf.logPrefix+format, args...)
+}
+
+func (node *nodeImpl) Info(args ...interface{}) {
+ log.Info(node.prependPrefix(args)...)
+}
+
+func (node *nodeImpl) Debugf(format string, args ...interface{}) {
+ log.Debugf(node.conf.logPrefix+format, args...)
+}
+
+func (node *nodeImpl) Debug(args ...interface{}) {
+ log.Debug(node.prependPrefix(args)...)
+}
+
+func (node *nodeImpl) Errorf(format string, args ...interface{}) {
+ log.Errorf(node.conf.logPrefix+format, args...)
+}
+
+func (node *nodeImpl) Error(args ...interface{}) {
+ log.Error(node.prependPrefix(args)...)
+}
+
+func (node *nodeImpl) Warningf(format string, args ...interface{}) {
+ log.Warningf(node.conf.logPrefix+format, args...)
+}
+
+func (node *nodeImpl) Warning(args ...interface{}) {
+ log.Warning(node.prependPrefix(args)...)
+}
diff --git a/core/crypto/node_sign.go b/core/crypto/node_sign.go
new file mode 100644
index 00000000000..9b45aa1ac99
--- /dev/null
+++ b/core/crypto/node_sign.go
@@ -0,0 +1,43 @@
+/*
+Copyright IBM Corp. 2016 All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package crypto
+
+import (
+ "math/big"
+
+ "github.com/hyperledger/fabric/core/crypto/primitives"
+)
+
+func (node *nodeImpl) sign(signKey interface{}, msg []byte) ([]byte, error) {
+ return primitives.ECDSASign(signKey, msg)
+}
+
+func (node *nodeImpl) signWithEnrollmentKey(msg []byte) ([]byte, error) {
+ return primitives.ECDSASign(node.enrollPrivKey, msg)
+}
+
+func (node *nodeImpl) ecdsaSignWithEnrollmentKey(msg []byte) (*big.Int, *big.Int, error) {
+ return primitives.ECDSASignDirect(node.enrollPrivKey, msg)
+}
+
+func (node *nodeImpl) verify(verKey interface{}, msg, signature []byte) (bool, error) {
+ return primitives.ECDSAVerify(verKey, msg, signature)
+}
+
+func (node *nodeImpl) verifyWithEnrollmentCert(msg, signature []byte) (bool, error) {
+ return primitives.ECDSAVerify(node.enrollCert.PublicKey, msg, signature)
+}
diff --git a/core/crypto/node_tca.go b/core/crypto/node_tca.go
new file mode 100644
index 00000000000..38068944131
--- /dev/null
+++ b/core/crypto/node_tca.go
@@ -0,0 +1,126 @@
+/*
+Copyright IBM Corp. 2016 All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package crypto
+
+import (
+ membersrvc "github.com/hyperledger/fabric/membersrvc/protos"
+
+ "errors"
+
+ "github.com/hyperledger/fabric/core/crypto/primitives"
+ "golang.org/x/net/context"
+ "google.golang.org/grpc"
+)
+
+func (node *nodeImpl) retrieveTCACertsChain(userID string) error {
+ if !node.ks.certMissing(node.conf.getTCACertsChainFilename()) {
+ return nil
+ }
+
+ // Retrieve TCA certificate and verify it
+ tcaCertRaw, err := node.getTCACertificate()
+ if err != nil {
+ node.Errorf("Failed getting TCA certificate [%s].", err.Error())
+
+ return err
+ }
+ node.Debugf("TCA certificate [% x]", tcaCertRaw)
+
+ // TODO: Test TCA cert againt root CA
+ _, err = primitives.DERToX509Certificate(tcaCertRaw)
+ if err != nil {
+ node.Errorf("Failed parsing TCA certificate [%s].", err.Error())
+
+ return err
+ }
+
+ // Store TCA cert
+ node.Debugf("Storing TCA certificate for [%s]...", userID)
+
+ if err := node.ks.storeCert(node.conf.getTCACertsChainFilename(), tcaCertRaw); err != nil {
+ node.Errorf("Failed storing tca certificate [%s].", err.Error())
+ return err
+ }
+
+ return nil
+}
+
+func (node *nodeImpl) loadTCACertsChain() error {
+ // Load TCA certs chain
+ node.Debug("Loading TCA certificates chain...")
+
+ cert, err := node.ks.loadCert(node.conf.getTCACertsChainFilename())
+ if err != nil {
+ node.Errorf("Failed loading TCA certificates chain [%s].", err.Error())
+
+ return err
+ }
+
+ // Prepare ecaCertPool
+ ok := node.tcaCertPool.AppendCertsFromPEM(cert)
+ if !ok {
+ node.Error("Failed appending TCA certificates chain.")
+
+ return errors.New("Failed appending TCA certificates chain.")
+ }
+
+ return nil
+}
+
+func (node *nodeImpl) getTCAClient() (*grpc.ClientConn, membersrvc.TCAPClient, error) {
+ node.Debug("Getting TCA client...")
+
+ conn, err := node.getClientConn(node.conf.getTCAPAddr(), node.conf.getTCAServerName())
+ if err != nil {
+ node.Errorf("Failed getting client connection: [%s]", err)
+ }
+
+ client := membersrvc.NewTCAPClient(conn)
+
+ node.Debug("Getting TCA client...done")
+
+ return conn, client, nil
+}
+
+func (node *nodeImpl) callTCAReadCACertificate(ctx context.Context, opts ...grpc.CallOption) (*membersrvc.Cert, error) {
+ // Get a TCA Client
+ sock, tcaP, err := node.getTCAClient()
+ defer sock.Close()
+
+ // Issue the request
+ cert, err := tcaP.ReadCACertificate(ctx, &membersrvc.Empty{}, opts...)
+ if err != nil {
+ node.Errorf("Failed requesting tca read certificate [%s].", err.Error())
+
+ return nil, err
+ }
+
+ return cert, nil
+}
+
+func (node *nodeImpl) getTCACertificate() ([]byte, error) {
+ response, err := node.callTCAReadCACertificate(context.Background())
+ if err != nil {
+ node.Errorf("Failed requesting TCA certificate [%s].", err.Error())
+
+ return nil, err
+ }
+
+ // TODO: check response.Cert against rootCA
+
+ return response.Cert, nil
+}
diff --git a/core/crypto/node_tlsca.go b/core/crypto/node_tlsca.go
new file mode 100644
index 00000000000..b7359d96ecf
--- /dev/null
+++ b/core/crypto/node_tlsca.go
@@ -0,0 +1,206 @@
+/*
+Copyright IBM Corp. 2016 All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package crypto
+
+import (
+ membersrvc "github.com/hyperledger/fabric/membersrvc/protos"
+
+ "crypto/ecdsa"
+ "crypto/rand"
+ "crypto/x509"
+ "errors"
+ "google/protobuf"
+ "time"
+
+ "github.com/golang/protobuf/proto"
+ "github.com/hyperledger/fabric/core/crypto/primitives"
+ "github.com/hyperledger/fabric/core/util"
+ "golang.org/x/net/context"
+ "google.golang.org/grpc"
+)
+
+func (node *nodeImpl) retrieveTLSCertificate(id, affiliation string) error {
+ if !node.ks.certMissing(node.conf.getTLSCertFilename()) {
+ return nil
+ }
+
+ key, tlsCertRaw, err := node.getTLSCertificateFromTLSCA(id, affiliation)
+ if err != nil {
+ node.Errorf("Failed getting tls certificate [id=%s] %s", id, err)
+
+ return err
+ }
+ node.Debugf("TLS Cert [% x]", tlsCertRaw)
+
+ node.Debugf("Storing TLS key and certificate for user [%s]...", id)
+
+ // Store tls key.
+ if err := node.ks.storePrivateKeyInClear(node.conf.getTLSKeyFilename(), key); err != nil {
+ node.Errorf("Failed storing tls key [id=%s]: %s", id, err)
+ return err
+ }
+
+ // Store tls cert
+ if err := node.ks.storeCert(node.conf.getTLSCertFilename(), tlsCertRaw); err != nil {
+ node.Errorf("Failed storing tls certificate [id=%s]: %s", id, err)
+ return err
+ }
+
+ return nil
+}
+
+func (node *nodeImpl) deleteTLSCertificate(id, affiliation string) error {
+ if err := node.ks.deletePrivateKeyInClear(node.conf.getTLSKeyFilename()); err != nil {
+ node.Errorf("Failed deleting tls key [id=%s]: %s", id, err)
+ return err
+ }
+
+ // Store tls cert
+ if err := node.ks.deleteCert(node.conf.getTLSCertFilename()); err != nil {
+ node.Errorf("Failed deleting tls certificate [id=%s]: %s", id, err)
+ return err
+ }
+
+ return nil
+}
+
+func (node *nodeImpl) loadTLSCertificate() error {
+ node.Debug("Loading tls certificate...")
+
+ cert, _, err := node.ks.loadCertX509AndDer(node.conf.getTLSCertFilename())
+ if err != nil {
+ node.Errorf("Failed parsing tls certificate [%s].", err.Error())
+
+ return err
+ }
+ node.tlsCert = cert
+
+ return nil
+}
+
+func (node *nodeImpl) loadTLSCACertsChain() error {
+ if node.conf.isTLSEnabled() {
+ node.Debug("Loading TLSCA certificates chain...")
+
+ pem, err := node.ks.loadExternalCert(node.conf.getTLSCACertsExternalPath())
+ if err != nil {
+ node.Errorf("Failed loading TLSCA certificates chain [%s].", err.Error())
+
+ return err
+ }
+
+ ok := node.tlsCertPool.AppendCertsFromPEM(pem)
+ if !ok {
+ node.Error("Failed appending TLSCA certificates chain.")
+
+ return errors.New("Failed appending TLSCA certificates chain.")
+ }
+
+ node.Debug("Loading TLSCA certificates chain...done")
+
+ } else {
+ node.Debug("TLS is disabled!!!")
+ }
+
+ return nil
+}
+
+func (node *nodeImpl) getTLSCertificateFromTLSCA(id, affiliation string) (interface{}, []byte, error) {
+ node.Debug("getTLSCertificate...")
+
+ priv, err := primitives.NewECDSAKey()
+
+ if err != nil {
+ node.Errorf("Failed generating key: %s", err)
+
+ return nil, nil, err
+ }
+
+ uuid := util.GenerateUUID()
+
+ // Prepare the request
+ pubraw, _ := x509.MarshalPKIXPublicKey(&priv.PublicKey)
+ now := time.Now()
+ timestamp := google_protobuf.Timestamp{Seconds: int64(now.Second()), Nanos: int32(now.Nanosecond())}
+
+ req := &membersrvc.TLSCertCreateReq{
+ Ts: ×tamp,
+ Id: &membersrvc.Identity{Id: id + "-" + uuid},
+ Pub: &membersrvc.PublicKey{
+ Type: membersrvc.CryptoType_ECDSA,
+ Key: pubraw,
+ }, Sig: nil}
+ rawreq, _ := proto.Marshal(req)
+ r, s, err := ecdsa.Sign(rand.Reader, priv, primitives.Hash(rawreq))
+ if err != nil {
+ panic(err)
+ }
+ R, _ := r.MarshalText()
+ S, _ := s.MarshalText()
+ req.Sig = &membersrvc.Signature{Type: membersrvc.CryptoType_ECDSA, R: R, S: S}
+
+ pbCert, err := node.callTLSCACreateCertificate(context.Background(), req)
+ if err != nil {
+ node.Errorf("Failed requesting tls certificate: %s", err)
+
+ return nil, nil, err
+ }
+
+ node.Debug("Verifing tls certificate...")
+
+ tlsCert, err := primitives.DERToX509Certificate(pbCert.Cert.Cert)
+ certPK := tlsCert.PublicKey.(*ecdsa.PublicKey)
+ primitives.VerifySignCapability(priv, certPK)
+
+ node.Debug("Verifing tls certificate...done!")
+
+ return priv, pbCert.Cert.Cert, nil
+}
+
+func (node *nodeImpl) getTLSCAClient() (*grpc.ClientConn, membersrvc.TLSCAPClient, error) {
+ node.Debug("Getting TLSCA client...")
+
+ conn, err := node.getClientConn(node.conf.getTLSCAPAddr(), node.conf.getTLSCAServerName())
+ if err != nil {
+ node.Errorf("Failed getting client connection: [%s]", err)
+ }
+
+ client := membersrvc.NewTLSCAPClient(conn)
+
+ node.Debug("Getting TLSCA client...done")
+
+ return conn, client, nil
+}
+
+func (node *nodeImpl) callTLSCACreateCertificate(ctx context.Context, in *membersrvc.TLSCertCreateReq, opts ...grpc.CallOption) (*membersrvc.TLSCertCreateResp, error) {
+ conn, tlscaP, err := node.getTLSCAClient()
+ if err != nil {
+ node.Errorf("Failed dialing in: %s", err)
+
+ return nil, err
+ }
+ defer conn.Close()
+
+ resp, err := tlscaP.CreateCertificate(ctx, in, opts...)
+ if err != nil {
+ node.Errorf("Failed requesting tls certificate: %s", err)
+
+ return nil, err
+ }
+
+ return resp, nil
+}
diff --git a/core/crypto/peer.go b/core/crypto/peer.go
new file mode 100644
index 00000000000..691736fbb99
--- /dev/null
+++ b/core/crypto/peer.go
@@ -0,0 +1,160 @@
+/*
+Copyright IBM Corp. 2016 All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package crypto
+
+import (
+ "sync"
+
+ "github.com/hyperledger/fabric/core/crypto/utils"
+)
+
+// Private types and variables
+
+type peerEntry struct {
+ peer Peer
+ counter int64
+}
+
+var (
+ // Map of initialized peers
+ peers = make(map[string]peerEntry)
+
+ // Sync
+ peerMutex sync.Mutex
+)
+
+// Public Methods
+
+// RegisterPeer registers a peer to the PKI infrastructure
+func RegisterPeer(name string, pwd []byte, enrollID, enrollPWD string) error {
+ peerMutex.Lock()
+ defer peerMutex.Unlock()
+
+ log.Infof("Registering peer [%s] with id [%s]...", enrollID, name)
+
+ if _, ok := peers[name]; ok {
+ log.Infof("Registering peer [%s] with id [%s]...done. Already initialized.", enrollID, name)
+
+ return nil
+ }
+
+ peer := newPeer()
+ if err := peer.register(NodePeer, name, pwd, enrollID, enrollPWD, nil); err != nil {
+ if err != utils.ErrAlreadyRegistered && err != utils.ErrAlreadyInitialized {
+ log.Errorf("Failed registering peer [%s] with id [%s] [%s].", enrollID, name, err)
+ return err
+ }
+ log.Infof("Registering peer [%s] with id [%s]...done. Already registered or initiliazed.", enrollID, name)
+ }
+ err := peer.close()
+ if err != nil {
+ // It is not necessary to report this error to the caller
+ log.Warningf("Registering peer [%s] with id [%s]. Failed closing [%s].", enrollID, name, err)
+ }
+
+ log.Infof("Registering peer [%s] with id [%s]...done!", enrollID, name)
+
+ return nil
+}
+
+// InitPeer initializes a peer named name with password pwd
+func InitPeer(name string, pwd []byte) (Peer, error) {
+ peerMutex.Lock()
+ defer peerMutex.Unlock()
+
+ log.Infof("Initializing peer [%s]...", name)
+
+ if entry, ok := peers[name]; ok {
+ log.Infof("Peer already initiliazied [%s]. Increasing counter from [%d]", name, peers[name].counter)
+ entry.counter++
+ peers[name] = entry
+
+ return peers[name].peer, nil
+ }
+
+ peer := newPeer()
+ if err := peer.init(NodePeer, name, pwd, nil); err != nil {
+ log.Errorf("Failed peer initialization [%s]: [%s]", name, err)
+
+ return nil, err
+ }
+
+ peers[name] = peerEntry{peer, 1}
+ log.Infof("Initializing peer [%s]...done!", name)
+
+ return peer, nil
+}
+
+// ClosePeer releases all the resources allocated by peers
+func ClosePeer(peer Peer) error {
+ peerMutex.Lock()
+ defer peerMutex.Unlock()
+
+ return closePeerInternal(peer, false)
+}
+
+// CloseAllPeers closes all the peers initialized so far
+func CloseAllPeers() (bool, []error) {
+ peerMutex.Lock()
+ defer peerMutex.Unlock()
+
+ log.Info("Closing all peers...")
+
+ errs := make([]error, len(peers))
+ for _, value := range peers {
+ err := closePeerInternal(value.peer, true)
+
+ errs = append(errs, err)
+ }
+
+ log.Info("Closing all peers...done!")
+
+ return len(errs) != 0, errs
+}
+
+// Private Methods
+
+func newPeer() *peerImpl {
+ return &peerImpl{&nodeImpl{}, sync.RWMutex{}, nil}
+}
+
+func closePeerInternal(peer Peer, force bool) error {
+ if peer == nil {
+ return utils.ErrNilArgument
+ }
+
+ name := peer.GetName()
+ log.Infof("Closing peer [%s]...", name)
+ entry, ok := peers[name]
+ if !ok {
+ return utils.ErrInvalidReference
+ }
+ if entry.counter == 1 || force {
+ defer delete(peers, name)
+ err := peers[name].peer.(*peerImpl).close()
+ log.Infof("Closing peer [%s]...done! [%s].", name, utils.ErrToString(err))
+
+ return err
+ }
+
+ // decrease counter
+ entry.counter--
+ peers[name] = entry
+ log.Infof("Closing peer [%s]...decreased counter at [%d].", name, peers[name].counter)
+
+ return nil
+}
diff --git a/core/crypto/peer_eca.go b/core/crypto/peer_eca.go
new file mode 100644
index 00000000000..dd713f642a9
--- /dev/null
+++ b/core/crypto/peer_eca.go
@@ -0,0 +1,121 @@
+/*
+Copyright IBM Corp. 2016 All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package crypto
+
+import (
+ "crypto/x509"
+ "fmt"
+ "strconv"
+
+ "github.com/hyperledger/fabric/core/crypto/primitives"
+ "github.com/hyperledger/fabric/core/crypto/utils"
+ membersrvc "github.com/hyperledger/fabric/membersrvc/protos"
+ "golang.org/x/net/context"
+)
+
+func (peer *peerImpl) getEnrollmentCert(id []byte) (*x509.Certificate, error) {
+ if len(id) == 0 {
+ return nil, fmt.Errorf("Invalid peer id. It is empty.")
+ }
+
+ sid := utils.EncodeBase64(id)
+
+ peer.Debugf("Getting enrollment certificate for [%s]", sid)
+
+ if cert := peer.getNodeEnrollmentCertificate(sid); cert != nil {
+ peer.Debugf("Enrollment certificate for [%s] already in memory.", sid)
+ return cert, nil
+ }
+
+ // Retrieve from the DB or from the ECA in case
+ peer.Debugf("Retrieve Enrollment certificate for [%s]...", sid)
+ rawCert, err := peer.ks.GetSignEnrollmentCert(id, peer.getEnrollmentCertByHashFromECA)
+ if err != nil {
+ peer.Errorf("Failed getting enrollment certificate for [%s]: [%s]", sid, err)
+
+ return nil, err
+ }
+
+ cert, err := primitives.DERToX509Certificate(rawCert)
+ if err != nil {
+ peer.Errorf("Failed parsing enrollment certificate for [%s]: [% x],[% x]", sid, rawCert, err)
+
+ return nil, err
+ }
+
+ peer.putNodeEnrollmentCertificate(sid, cert)
+
+ return cert, nil
+}
+
+func (peer *peerImpl) getEnrollmentCertByHashFromECA(id []byte) ([]byte, []byte, error) {
+ // Prepare the request
+ peer.Debugf("Reading certificate for hash [% x]", id)
+
+ req := &membersrvc.Hash{Hash: id}
+ response, err := peer.callECAReadCertificateByHash(context.Background(), req)
+ if err != nil {
+ peer.Errorf("Failed requesting enrollment certificate [%s].", err.Error())
+
+ return nil, nil, err
+ }
+
+ peer.Debugf("Certificate for hash [% x] = [% x][% x]", id, response.Sign, response.Enc)
+
+ // Verify response.Sign
+ x509Cert, err := primitives.DERToX509Certificate(response.Sign)
+ if err != nil {
+ peer.Errorf("Failed parsing signing enrollment certificate for encrypting: [%s]", err)
+
+ return nil, nil, err
+ }
+
+ // Check role
+ roleRaw, err := primitives.GetCriticalExtension(x509Cert, ECertSubjectRole)
+ if err != nil {
+ peer.Errorf("Failed parsing ECertSubjectRole in enrollment certificate for signing: [%s]", err)
+
+ return nil, nil, err
+ }
+
+ role, err := strconv.ParseInt(string(roleRaw), 10, len(roleRaw)*8)
+ if err != nil {
+ peer.Errorf("Failed parsing ECertSubjectRole in enrollment certificate for signing: [%s]", err)
+
+ return nil, nil, err
+ }
+
+ if membersrvc.Role(role) != membersrvc.Role_VALIDATOR && membersrvc.Role(role) != membersrvc.Role_PEER {
+ peer.Errorf("Invalid ECertSubjectRole in enrollment certificate for signing. Not a validator or peer: [%s]", err)
+
+ return nil, nil, err
+ }
+
+ return response.Sign, response.Enc, nil
+}
+
+func (peer *peerImpl) getNodeEnrollmentCertificate(sid string) *x509.Certificate {
+ peer.nodeEnrollmentCertificatesMutex.RLock()
+ defer peer.nodeEnrollmentCertificatesMutex.RUnlock()
+ return peer.nodeEnrollmentCertificates[sid]
+}
+
+func (peer *peerImpl) putNodeEnrollmentCertificate(sid string, cert *x509.Certificate) {
+ peer.nodeEnrollmentCertificatesMutex.Lock()
+ defer peer.nodeEnrollmentCertificatesMutex.Unlock()
+ peer.nodeEnrollmentCertificates[sid] = cert
+}
diff --git a/core/crypto/peer_impl.go b/core/crypto/peer_impl.go
new file mode 100644
index 00000000000..e848b167528
--- /dev/null
+++ b/core/crypto/peer_impl.go
@@ -0,0 +1,234 @@
+/*
+Copyright IBM Corp. 2016 All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package crypto
+
+import (
+ "crypto/ecdsa"
+ "crypto/x509"
+ "fmt"
+ "sync"
+
+ "github.com/golang/protobuf/proto"
+ "github.com/hyperledger/fabric/core/crypto/primitives"
+ "github.com/hyperledger/fabric/core/crypto/utils"
+ obc "github.com/hyperledger/fabric/protos"
+)
+
+type peerImpl struct {
+ *nodeImpl
+
+ nodeEnrollmentCertificatesMutex sync.RWMutex
+ nodeEnrollmentCertificates map[string]*x509.Certificate
+}
+
+// Public methods
+
+// GetID returns this peer's identifier
+func (peer *peerImpl) GetID() []byte {
+ return utils.Clone(peer.id)
+}
+
+// GetEnrollmentID returns this peer's enrollment id
+func (peer *peerImpl) GetEnrollmentID() string {
+ return peer.enrollID
+}
+
+// TransactionPreValidation verifies that the transaction is
+// well formed with the respect to the security layer
+// prescriptions (i.e. signature verification).
+func (peer *peerImpl) TransactionPreValidation(tx *obc.Transaction) (*obc.Transaction, error) {
+ if !peer.IsInitialized() {
+ return nil, utils.ErrNotInitialized
+ }
+
+ // peer.debug("Pre validating [%s].", tx.String())
+ peer.Debugf("Tx confdential level [%s].", tx.ConfidentialityLevel.String())
+
+ if tx.Cert != nil && tx.Signature != nil {
+ // Verify the transaction
+ // 1. Unmarshal cert
+ cert, err := primitives.DERToX509Certificate(tx.Cert)
+ if err != nil {
+ peer.Errorf("TransactionPreExecution: failed unmarshalling cert [%s].", err.Error())
+ return tx, err
+ }
+
+ // Verify transaction certificate against root
+ // DER to x509
+ x509Cert, err := primitives.DERToX509Certificate(tx.Cert)
+ if err != nil {
+ peer.Debugf("Failed parsing certificate [% x]: [%s].", tx.Cert, err)
+
+ return tx, err
+ }
+
+ // 1. Get rid of the extensions that cannot be checked now
+ x509Cert.UnhandledCriticalExtensions = nil
+ // 2. Check against TCA certPool
+ if _, err = primitives.CheckCertAgainRoot(x509Cert, peer.tcaCertPool); err != nil {
+ peer.Warningf("Failed verifing certificate against TCA cert pool [%s].", err.Error())
+ // 3. Check against ECA certPool, if this check also fails then return an error
+ if _, err = primitives.CheckCertAgainRoot(x509Cert, peer.ecaCertPool); err != nil {
+ peer.Warningf("Failed verifing certificate against ECA cert pool [%s].", err.Error())
+
+ return tx, fmt.Errorf("Certificate has not been signed by a trusted authority. [%s]", err)
+ }
+ }
+
+ // 3. Marshall tx without signature
+ signature := tx.Signature
+ tx.Signature = nil
+ rawTx, err := proto.Marshal(tx)
+ if err != nil {
+ peer.Errorf("TransactionPreExecution: failed marshaling tx [%s].", err.Error())
+ return tx, err
+ }
+ tx.Signature = signature
+
+ // 2. Verify signature
+ ok, err := peer.verify(cert.PublicKey, rawTx, tx.Signature)
+ if err != nil {
+ peer.Errorf("TransactionPreExecution: failed marshaling tx [%s].", err.Error())
+ return tx, err
+ }
+
+ if !ok {
+ return tx, utils.ErrInvalidTransactionSignature
+ }
+ } else {
+ if tx.Cert == nil {
+ return tx, utils.ErrTransactionCertificate
+ }
+
+ if tx.Signature == nil {
+ return tx, utils.ErrTransactionSignature
+ }
+ }
+
+ return tx, nil
+}
+
+// TransactionPreValidation verifies that the transaction is
+// well formed with the respect to the security layer
+// prescriptions (i.e. signature verification). If this is the case,
+// the method prepares the transaction to be executed.
+func (peer *peerImpl) TransactionPreExecution(tx *obc.Transaction) (*obc.Transaction, error) {
+ return nil, utils.ErrNotImplemented
+}
+
+// Sign signs msg with this validator's signing key and outputs
+// the signature if no error occurred.
+func (peer *peerImpl) Sign(msg []byte) ([]byte, error) {
+ return peer.signWithEnrollmentKey(msg)
+}
+
+// Verify checks that signature if a valid signature of message under vkID's verification key.
+// If the verification succeeded, Verify returns nil meaning no error occurred.
+// If vkID is nil, then the signature is verified against this validator's verification key.
+func (peer *peerImpl) Verify(vkID, signature, message []byte) error {
+ if len(vkID) == 0 {
+ return fmt.Errorf("Invalid peer id. It is empty.")
+ }
+ if len(signature) == 0 {
+ return fmt.Errorf("Invalid signature. It is empty.")
+ }
+ if len(message) == 0 {
+ return fmt.Errorf("Invalid message. It is empty.")
+ }
+
+ cert, err := peer.getEnrollmentCert(vkID)
+ if err != nil {
+ peer.Errorf("Failed getting enrollment cert for [% x]: [%s]", vkID, err)
+
+ return err
+ }
+
+ vk := cert.PublicKey.(*ecdsa.PublicKey)
+
+ ok, err := peer.verify(vk, message, signature)
+ if err != nil {
+ peer.Errorf("Failed verifying signature for [% x]: [%s]", vkID, err)
+
+ return err
+ }
+
+ if !ok {
+ peer.Errorf("Failed invalid signature for [% x]", vkID)
+
+ return utils.ErrInvalidSignature
+ }
+
+ return nil
+}
+
+func (peer *peerImpl) GetStateEncryptor(deployTx, invokeTx *obc.Transaction) (StateEncryptor, error) {
+ return nil, utils.ErrNotImplemented
+}
+
+func (peer *peerImpl) GetTransactionBinding(tx *obc.Transaction) ([]byte, error) {
+ return primitives.Hash(append(tx.Cert, tx.Nonce...)), nil
+}
+
+// Private methods
+
+func (peer *peerImpl) register(eType NodeType, name string, pwd []byte, enrollID, enrollPWD string, regFunc registerFunc) error {
+
+ if err := peer.nodeImpl.register(eType, name, pwd, enrollID, enrollPWD, regFunc); err != nil {
+ peer.Errorf("Failed registering peer [%s]: [%s]", enrollID, err)
+ return err
+ }
+
+ return nil
+}
+
+func (peer *peerImpl) init(eType NodeType, id string, pwd []byte, initFunc initalizationFunc) error {
+
+ peerInitFunc := func(eType NodeType, name string, pwd []byte) error {
+ // Initialize keystore
+ peer.Debug("Init keystore...")
+ err := peer.initKeyStore()
+ if err != nil {
+ if err != utils.ErrKeyStoreAlreadyInitialized {
+ peer.Error("Keystore already initialized.")
+ } else {
+ peer.Errorf("Failed initiliazing keystore [%s].", err)
+
+ return err
+ }
+ }
+ peer.Debug("Init keystore...done.")
+
+ // EnrollCerts
+ peer.nodeEnrollmentCertificates = make(map[string]*x509.Certificate)
+
+ if initFunc != nil {
+ return initFunc(eType, id, pwd)
+ }
+
+ return nil
+ }
+
+ if err := peer.nodeImpl.init(eType, id, pwd, peerInitFunc); err != nil {
+ return err
+ }
+
+ return nil
+}
+
+func (peer *peerImpl) close() error {
+ return peer.nodeImpl.close()
+}
diff --git a/core/crypto/peer_ks.go b/core/crypto/peer_ks.go
new file mode 100644
index 00000000000..748290916a6
--- /dev/null
+++ b/core/crypto/peer_ks.go
@@ -0,0 +1,133 @@
+/*
+Copyright IBM Corp. 2016 All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package crypto
+
+import (
+ "database/sql"
+ "fmt"
+
+ "github.com/hyperledger/fabric/core/crypto/utils"
+)
+
+func (peer *peerImpl) initKeyStore() error {
+ // create tables
+ peer.Debugf("Create Table [%s] if not exists", "Certificates")
+ if _, err := peer.ks.sqlDB.Exec("CREATE TABLE IF NOT EXISTS Certificates (id VARCHAR, certsign BLOB, certenc BLOB, PRIMARY KEY (id))"); err != nil {
+ peer.Errorf("Failed creating table [%s].", err.Error())
+ return err
+ }
+
+ return nil
+}
+
+func (ks *keyStore) GetSignEnrollmentCert(id []byte, certFetcher func(id []byte) ([]byte, []byte, error)) ([]byte, error) {
+ if len(id) == 0 {
+ return nil, fmt.Errorf("Invalid peer id. It is empty.")
+ }
+
+ ks.m.Lock()
+ defer ks.m.Unlock()
+
+ sid := utils.EncodeBase64(id)
+
+ certSign, certEnc, err := ks.selectSignEnrollmentCert(sid)
+ if err != nil {
+ ks.node.Errorf("Failed selecting enrollment cert [%s].", err.Error())
+
+ return nil, err
+ }
+
+ if certSign == nil {
+ ks.node.Debugf("Cert for [%s] not available. Fetching from ECA....", sid)
+
+ // If No cert is available, fetch from ECA
+
+ // 1. Fetch
+ ks.node.Debug("Fectch Enrollment Certificate from ECA...")
+ certSign, certEnc, err = certFetcher(id)
+ if err != nil {
+ return nil, err
+ }
+
+ // 2. Store
+ ks.node.Debug("Store certificate...")
+ tx, err := ks.sqlDB.Begin()
+ if err != nil {
+ ks.node.Errorf("Failed beginning transaction [%s].", err.Error())
+
+ return nil, err
+ }
+
+ ks.node.Debugf("Insert id [%s].", sid)
+ ks.node.Debugf("Insert cert [% x].", certSign)
+
+ _, err = tx.Exec("INSERT INTO Certificates (id, certsign, certenc) VALUES (?, ?, ?)", sid, certSign, certEnc)
+
+ if err != nil {
+ ks.node.Errorf("Failed inserting cert [%s].", err.Error())
+
+ tx.Rollback()
+
+ return nil, err
+ }
+
+ err = tx.Commit()
+ if err != nil {
+ ks.node.Errorf("Failed committing transaction [%s].", err.Error())
+
+ tx.Rollback()
+
+ return nil, err
+ }
+
+ ks.node.Debug("Fectch Enrollment Certificate from ECA...done!")
+
+ certSign, certEnc, err = ks.selectSignEnrollmentCert(sid)
+ if err != nil {
+ ks.node.Errorf("Failed selecting next TCert after fetching [%s].", err.Error())
+
+ return nil, err
+ }
+ }
+
+ ks.node.Debugf("Cert for [%s] = [% x]", sid, certSign)
+
+ return certSign, nil
+}
+
+func (ks *keyStore) selectSignEnrollmentCert(id string) ([]byte, []byte, error) {
+ ks.node.Debugf("Select Sign Enrollment Cert for id [%s]", id)
+
+ // Get the first row available
+ var cert []byte
+ row := ks.sqlDB.QueryRow("SELECT certsign FROM Certificates where id = ?", id)
+ err := row.Scan(&cert)
+
+ if err == sql.ErrNoRows {
+ return nil, nil, nil
+ } else if err != nil {
+ ks.node.Errorf("Error during select [%s].", err.Error())
+
+ return nil, nil, err
+ }
+
+ ks.node.Debugf("Cert [% x].", cert)
+
+ ks.node.Debug("Select Enrollment Cert...done!")
+
+ return cert, nil, nil
+}
diff --git a/core/crypto/primitives/aes.go b/core/crypto/primitives/aes.go
new file mode 100644
index 00000000000..0cd0ab19c49
--- /dev/null
+++ b/core/crypto/primitives/aes.go
@@ -0,0 +1,154 @@
+/*
+Copyright IBM Corp. 2016 All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package primitives
+
+import (
+ "bytes"
+ "crypto/aes"
+ "crypto/cipher"
+ "crypto/rand"
+ "errors"
+ "fmt"
+ "io"
+)
+
+const (
+ // AESKeyLength is the default AES key length
+ AESKeyLength = 32
+
+ // NonceSize is the default NonceSize
+ NonceSize = 24
+)
+
+// GenAESKey returns a random AES key of length AESKeyLength
+func GenAESKey() ([]byte, error) {
+ return GetRandomBytes(AESKeyLength)
+}
+
+// PKCS7Padding pads as prescribed by the PKCS7 standard
+func PKCS7Padding(src []byte) []byte {
+ padding := aes.BlockSize - len(src)%aes.BlockSize
+ padtext := bytes.Repeat([]byte{byte(padding)}, padding)
+ return append(src, padtext...)
+}
+
+// PKCS7UnPadding unpads as prescribed by the PKCS7 standard
+func PKCS7UnPadding(src []byte) ([]byte, error) {
+ length := len(src)
+ unpadding := int(src[length-1])
+
+ if unpadding > aes.BlockSize || unpadding == 0 {
+ return nil, fmt.Errorf("invalid padding")
+ }
+
+ pad := src[len(src)-unpadding:]
+ for i := 0; i < unpadding; i++ {
+ if pad[i] != byte(unpadding) {
+ return nil, fmt.Errorf("invalid padding")
+ }
+ }
+
+ return src[:(length - unpadding)], nil
+}
+
+// CBCEncrypt encrypts using CBC mode
+func CBCEncrypt(key, s []byte) ([]byte, error) {
+ // CBC mode works on blocks so plaintexts may need to be padded to the
+ // next whole block. For an example of such padding, see
+ // https://tools.ietf.org/html/rfc5246#section-6.2.3.2. Here we'll
+ // assume that the plaintext is already of the correct length.
+ if len(s)%aes.BlockSize != 0 {
+ return nil, errors.New("plaintext is not a multiple of the block size")
+ }
+
+ block, err := aes.NewCipher(key)
+ if err != nil {
+ return nil, err
+ }
+
+ // The IV needs to be unique, but not secure. Therefore it's common to
+ // include it at the beginning of the ciphertext.
+ ciphertext := make([]byte, aes.BlockSize+len(s))
+ iv := ciphertext[:aes.BlockSize]
+ if _, err := io.ReadFull(rand.Reader, iv); err != nil {
+ return nil, err
+ }
+
+ mode := cipher.NewCBCEncrypter(block, iv)
+ mode.CryptBlocks(ciphertext[aes.BlockSize:], s)
+
+ // It's important to remember that ciphertexts must be authenticated
+ // (i.e. by using crypto/hmac) as well as being encrypted in order to
+ // be secure.
+ return ciphertext, nil
+}
+
+// CBCDecrypt decrypts using CBC mode
+func CBCDecrypt(key, src []byte) ([]byte, error) {
+ block, err := aes.NewCipher(key)
+ if err != nil {
+ return nil, err
+ }
+
+ // The IV needs to be unique, but not secure. Therefore it's common to
+ // include it at the beginning of the ciphertext.
+ if len(src) < aes.BlockSize {
+ return nil, errors.New("ciphertext too short")
+ }
+ iv := src[:aes.BlockSize]
+ src = src[aes.BlockSize:]
+
+ // CBC mode always works in whole blocks.
+ if len(src)%aes.BlockSize != 0 {
+ return nil, errors.New("ciphertext is not a multiple of the block size")
+ }
+
+ mode := cipher.NewCBCDecrypter(block, iv)
+
+ // CryptBlocks can work in-place if the two arguments are the same.
+ mode.CryptBlocks(src, src)
+
+ // If the original plaintext lengths are not a multiple of the block
+ // size, padding would have to be added when encrypting, which would be
+ // removed at this point. For an example, see
+ // https://tools.ietf.org/html/rfc5246#section-6.2.3.2. However, it's
+ // critical to note that ciphertexts must be authenticated (i.e. by
+ // using crypto/hmac) before being decrypted in order to avoid creating
+ // a padding oracle.
+
+ return src, nil
+}
+
+// CBCPKCS7Encrypt combines CBC encryption and PKCS7 padding
+func CBCPKCS7Encrypt(key, src []byte) ([]byte, error) {
+ return CBCEncrypt(key, PKCS7Padding(src))
+}
+
+// CBCPKCS7Decrypt combines CBC decryption and PKCS7 unpadding
+func CBCPKCS7Decrypt(key, src []byte) ([]byte, error) {
+ pt, err := CBCDecrypt(key, src)
+ if err != nil {
+ return nil, err
+ }
+
+ original, err := PKCS7UnPadding(pt)
+ if err != nil {
+ return nil, err
+ }
+
+ return original, nil
+}
diff --git a/core/crypto/primitives/aes_test.go b/core/crypto/primitives/aes_test.go
new file mode 100644
index 00000000000..315e02fe1b7
--- /dev/null
+++ b/core/crypto/primitives/aes_test.go
@@ -0,0 +1,481 @@
+/*
+Copyright IBM Corp. 2016 All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// This package contains unit-tests for the
+// github.com/hyperledger/fabric/core/crypto/primitives package
+package primitives_test
+
+import (
+ "bytes"
+ "crypto/aes"
+ "crypto/rand"
+ "math/big"
+ "testing"
+
+ "github.com/hyperledger/fabric/core/crypto/primitives"
+)
+
+// TestCBCPKCS7EncryptCBCPKCS7Decrypt encrypts using CBCPKCS7Encrypt and decrypts using CBCPKCS7Decrypt.
+func TestCBCPKCS7EncryptCBCPKCS7Decrypt(t *testing.T) {
+
+ // Note: The purpose of this test is not to test AES-256 in CBC mode's strength
+ // ... but rather to verify the code wrapping/unwrapping the cipher.
+ key := make([]byte, primitives.AESKeyLength)
+ rand.Reader.Read(key)
+
+ // 123456789012345678901234567890123456789012
+ var ptext = []byte("a message with arbitrary length (42 bytes)")
+
+ encrypted, encErr := primitives.CBCPKCS7Encrypt(key, ptext)
+ if encErr != nil {
+ t.Fatalf("Error encrypting '%s': %s", ptext, encErr)
+ }
+
+ decrypted, dErr := primitives.CBCPKCS7Decrypt(key, encrypted)
+ if dErr != nil {
+ t.Fatalf("Error decrypting the encrypted '%s': %v", ptext, dErr)
+ }
+
+ if string(ptext[:]) != string(decrypted[:]) {
+ t.Fatal("Decrypt( Encrypt( ptext ) ) != ptext: Ciphertext decryption with the same key must result in the original plaintext!")
+ }
+
+}
+
+// TestPKCS7Padding verifies the PKCS#7 padding, using a human readable plaintext.
+func TestPKCS7Padding(t *testing.T) {
+
+ // 0 byte/length ptext
+ ptext := []byte("")
+ expected := []byte{16, 16, 16, 16,
+ 16, 16, 16, 16,
+ 16, 16, 16, 16,
+ 16, 16, 16, 16}
+ result := primitives.PKCS7Padding(ptext)
+
+ if !bytes.Equal(expected, result) {
+ t.Fatal("Padding error! Expected: ", expected, "', received: '", result, "'")
+ }
+
+ // 1 byte/length ptext
+ ptext = []byte("1")
+ expected = []byte{'1', 15, 15, 15,
+ 15, 15, 15, 15,
+ 15, 15, 15, 15,
+ 15, 15, 15, 15}
+ result = primitives.PKCS7Padding(ptext)
+
+ if !bytes.Equal(expected, result) {
+ t.Fatal("Padding error! Expected: '", expected, "', received: '", result, "'")
+ }
+
+ // 2 byte/length ptext
+ ptext = []byte("12")
+ expected = []byte{'1', '2', 14, 14,
+ 14, 14, 14, 14,
+ 14, 14, 14, 14,
+ 14, 14, 14, 14}
+ result = primitives.PKCS7Padding(ptext)
+
+ if !bytes.Equal(expected, result) {
+ t.Fatal("Padding error! Expected: '", expected, "', received: '", result, "'")
+ }
+
+ // 3 to aes.BlockSize-1 byte plaintext
+ ptext = []byte("1234567890ABCDEF")
+ for i := 3; i < aes.BlockSize; i++ {
+ result := primitives.PKCS7Padding(ptext[:i])
+
+ padding := aes.BlockSize - i
+ expectedPadding := bytes.Repeat([]byte{byte(padding)}, padding)
+ expected = append(ptext[:i], expectedPadding...)
+
+ if !bytes.Equal(result, expected) {
+ t.Fatal("Padding error! Expected: '", expected, "', received: '", result, "'")
+ }
+
+ }
+
+ // aes.BlockSize length ptext
+ ptext = bytes.Repeat([]byte{byte('x')}, aes.BlockSize)
+ result = primitives.PKCS7Padding(ptext)
+
+ expectedPadding := bytes.Repeat([]byte{byte(aes.BlockSize)}, aes.BlockSize)
+ expected = append(ptext, expectedPadding...)
+
+ if len(result) != 2*aes.BlockSize {
+ t.Fatal("Padding error: expected the length of the returned slice to be 2 times aes.BlockSize")
+ }
+
+ if !bytes.Equal(expected, result) {
+ t.Fatal("Padding error! Expected: '", expected, "', received: '", result, "'")
+ }
+
+}
+
+// TestPKCS7UnPadding verifies the PKCS#7 unpadding, using a human readable plaintext.
+func TestPKCS7UnPadding(t *testing.T) {
+
+ // 0 byte/length ptext
+ expected := []byte("")
+ ptext := []byte{16, 16, 16, 16,
+ 16, 16, 16, 16,
+ 16, 16, 16, 16,
+ 16, 16, 16, 16}
+
+ result, _ := primitives.PKCS7UnPadding(ptext)
+
+ if !bytes.Equal(expected, result) {
+ t.Fatal("UnPadding error! Expected: '", expected, "', received: '", result, "'")
+ }
+
+ // 1 byte/length ptext
+ expected = []byte("1")
+ ptext = []byte{'1', 15, 15, 15,
+ 15, 15, 15, 15,
+ 15, 15, 15, 15,
+ 15, 15, 15, 15}
+
+ result, _ = primitives.PKCS7UnPadding(ptext)
+
+ if !bytes.Equal(expected, result) {
+ t.Fatal("UnPadding error! Expected: '", expected, "', received: '", result, "'")
+ }
+
+ // 2 byte/length ptext
+ expected = []byte("12")
+ ptext = []byte{'1', '2', 14, 14,
+ 14, 14, 14, 14,
+ 14, 14, 14, 14,
+ 14, 14, 14, 14}
+
+ result, _ = primitives.PKCS7UnPadding(ptext)
+
+ if !bytes.Equal(expected, result) {
+ t.Fatal("UnPadding error! Expected: '", expected, "', received: '", result, "'")
+ }
+
+ // 3 to aes.BlockSize-1 byte plaintext
+ base := []byte("1234567890ABCDEF")
+ for i := 3; i < aes.BlockSize; i++ {
+ iPad := aes.BlockSize - i
+ padding := bytes.Repeat([]byte{byte(iPad)}, iPad)
+ ptext = append(base[:i], padding...)
+
+ expected := base[:i]
+ result, _ := primitives.PKCS7UnPadding(ptext)
+
+ if !bytes.Equal(result, expected) {
+ t.Fatal("UnPadding error! Expected: '", expected, "', received: '", result, "'")
+ }
+
+ }
+
+ // aes.BlockSize length ptext
+ expected = bytes.Repeat([]byte{byte('x')}, aes.BlockSize)
+ padding := bytes.Repeat([]byte{byte(aes.BlockSize)}, aes.BlockSize)
+ ptext = append(expected, padding...)
+
+ result, _ = primitives.PKCS7UnPadding(ptext)
+
+ if !bytes.Equal(expected, result) {
+ t.Fatal("UnPadding error! Expected: '", expected, "', received: '", result, "'")
+ }
+}
+
+// TestCBCEncryptCBCPKCS7Decrypt_BlockSizeLengthPlaintext verifies that CBCPKCS7Decrypt returns an error
+// when attempting to decrypt ciphertext of an irreproducible length.
+func TestCBCEncryptCBCPKCS7Decrypt_BlockSizeLengthPlaintext(t *testing.T) {
+
+ // One of the purposes of this test is to also document and clarify the expected behavior, i.e., that an extra
+ // block is appended to the message at the padding stage, as per the spec of PKCS#7 v1.5 [see RFC-2315 p.21]
+ key := make([]byte, primitives.AESKeyLength)
+ rand.Reader.Read(key)
+
+ // 1234567890123456
+ var ptext = []byte("a 16 byte messag")
+
+ encrypted, encErr := primitives.CBCEncrypt(key, ptext)
+ if encErr != nil {
+ t.Fatalf("Error encrypting '%s': %v", ptext, encErr)
+ }
+
+ decrypted, dErr := primitives.CBCPKCS7Decrypt(key, encrypted)
+ if dErr == nil {
+ t.Fatalf("Expected an error decrypting ptext '%s'. Decrypted to '%v'", dErr, decrypted)
+ }
+}
+
+// TestCBCPKCS7EncryptCBCDecrypt_ExpectingCorruptMessage verifies that CBCDecrypt can decrypt the unpadded
+// version of the ciphertext, of a message of BlockSize length.
+func TestCBCPKCS7EncryptCBCDecrypt_ExpectingCorruptMessage(t *testing.T) {
+
+ // One of the purposes of this test is to also document and clarify the expected behavior, i.e., that an extra
+ // block is appended to the message at the padding stage, as per the spec of PKCS#7 v1.5 [see RFC-2315 p.21]
+ key := make([]byte, primitives.AESKeyLength)
+ rand.Reader.Read(key)
+
+ // 0123456789ABCDEF
+ var ptext = []byte("a 16 byte messag")
+
+ encrypted, encErr := primitives.CBCPKCS7Encrypt(key, ptext)
+ if encErr != nil {
+ t.Fatalf("Error encrypting ptext %v", encErr)
+ }
+
+ decrypted, dErr := primitives.CBCDecrypt(key, encrypted)
+ if dErr != nil {
+ t.Fatalf("Error encrypting ptext %v, %v", dErr, decrypted)
+ }
+
+ if string(ptext[:]) != string(decrypted[:aes.BlockSize]) {
+ t.Log("ptext: ", ptext)
+ t.Log("decrypted: ", decrypted[:aes.BlockSize])
+ t.Fatal("Encryption->Decryption with same key should result in original ptext")
+ }
+
+ if !bytes.Equal(decrypted[aes.BlockSize:], bytes.Repeat([]byte{byte(aes.BlockSize)}, aes.BlockSize)) {
+ t.Fatal("Expected extra block with padding in encrypted ptext", decrypted)
+ }
+
+}
+
+// TestCBCPKCS7Encrypt_EmptyPlaintext encrypts and pad an empty ptext. Verifying as well that the ciphertext length is as expected.
+func TestCBCPKCS7Encrypt_EmptyPlaintext(t *testing.T) {
+
+ key := make([]byte, primitives.AESKeyLength)
+ rand.Reader.Read(key)
+
+ t.Log("Generated key: ", key)
+
+ var emptyPlaintext = []byte("")
+ t.Log("Plaintext length: ", len(emptyPlaintext))
+
+ ciphertext, encErr := primitives.CBCPKCS7Encrypt(key, emptyPlaintext)
+ if encErr != nil {
+ t.Fatalf("Error encrypting '%v'", encErr)
+ }
+
+ // Expected ciphertext length: primitives.AESKeyLength (=32)
+ // As part of the padding, at least one block gets encrypted (while the first block is the IV)
+ const expectedLength = aes.BlockSize + aes.BlockSize
+ if len(ciphertext) != expectedLength {
+ t.Fatalf("Wrong ciphertext length. Expected %d, recieved %d", expectedLength, len(ciphertext))
+ }
+
+ t.Log("Ciphertext length: ", len(ciphertext))
+ t.Log("Cipher: ", ciphertext)
+}
+
+// TestCBCEncrypt_EmptyPlaintext encrypts an empty message. Verifying as well that the ciphertext length is as expected.
+func TestCBCEncrypt_EmptyPlaintext(t *testing.T) {
+
+ key := make([]byte, primitives.AESKeyLength)
+ rand.Reader.Read(key)
+ t.Log("Generated key: ", key)
+
+ var emptyPlaintext = []byte("")
+ t.Log("Message length: ", len(emptyPlaintext))
+
+ ciphertext, encErr := primitives.CBCEncrypt(key, emptyPlaintext)
+ if encErr != nil {
+ }
+
+ t.Log("Ciphertext length: ", len(ciphertext))
+
+ // Expected cipher length: aes.BlockSize, the first and only block is the IV
+ var expectedLength = aes.BlockSize
+
+ if len(ciphertext) != expectedLength {
+ t.Fatalf("Wrong ciphertext length. Expected: '%d', received: '%d'", expectedLength, len(ciphertext))
+ }
+ t.Log("Ciphertext: ", ciphertext)
+}
+
+// TestCBCPKCS7Encrypt_VerifyRandomIVs encrypts twice with same key. The first 16 bytes should be different if IV is generated randomly.
+func TestCBCPKCS7Encrypt_VerifyRandomIVs(t *testing.T) {
+
+ key := make([]byte, aes.BlockSize)
+ rand.Reader.Read(key)
+ t.Log("Key 1", key)
+
+ var ptext = []byte("a message to encrypt")
+
+ ciphertext1, err := primitives.CBCPKCS7Encrypt(key, ptext)
+ if err != nil {
+ t.Fatalf("Error encrypting '%s': %s", ptext, err)
+ }
+
+ // Expecting a different IV if same message is encrypted with same key
+ ciphertext2, err := primitives.CBCPKCS7Encrypt(key, ptext)
+ if err != nil {
+ t.Fatalf("Error encrypting '%s': %s", ptext, err)
+ }
+
+ iv1 := ciphertext1[:aes.BlockSize]
+ iv2 := ciphertext2[:aes.BlockSize]
+
+ t.Log("Ciphertext1: ", iv1)
+ t.Log("Ciphertext2: ", iv2)
+ t.Log("bytes.Equal: ", bytes.Equal(iv1, iv2))
+
+ if bytes.Equal(iv1, iv2) {
+ t.Fatal("Error: ciphertexts contain identical initialization vectors (IVs)")
+ }
+}
+
+// TestCBCPKCS7Encrypt_CorrectCiphertextLengthCheck verifies that the returned ciphertext lengths are as expected.
+func TestCBCPKCS7Encrypt_CorrectCiphertextLengthCheck(t *testing.T) {
+
+ key := make([]byte, aes.BlockSize)
+ rand.Reader.Read(key)
+
+ // length of message (in bytes) == aes.BlockSize (16 bytes)
+ // The expected cipher length = IV length (1 block) + 1 block message
+
+ var ptext = []byte("0123456789ABCDEF")
+
+ for i := 1; i < aes.BlockSize; i++ {
+ ciphertext, err := primitives.CBCPKCS7Encrypt(key, ptext[:i])
+ if err != nil {
+ t.Fatal("Error encrypting '", ptext, "'")
+ }
+
+ expectedLength := aes.BlockSize + aes.BlockSize
+ if len(ciphertext) != expectedLength {
+ t.Fatalf("Incorrect ciphertext incorrect: expected '%d', received '%d'", expectedLength, len(ciphertext))
+ }
+ }
+}
+
+// TestCBCEncryptCBCDecrypt_KeyMismatch attempts to decrypt with a different key than the one used for encryption.
+func TestCBCEncryptCBCDecrypt_KeyMismatch(t *testing.T) {
+
+ // Generate a random key
+ key := make([]byte, aes.BlockSize)
+ rand.Reader.Read(key)
+
+ // Clone & tamper with the key
+ wrongKey := make([]byte, aes.BlockSize)
+ copy(wrongKey, key[:])
+ wrongKey[0] = key[0] + 1
+
+ var ptext = []byte("1234567890ABCDEF")
+ encrypted, encErr := primitives.CBCEncrypt(key, ptext)
+ if encErr != nil {
+ t.Fatalf("Error encrypting '%s': %v", ptext, encErr)
+ }
+
+ decrypted, decErr := primitives.CBCDecrypt(wrongKey, encrypted)
+ if decErr != nil {
+ t.Fatalf("Error decrypting '%s': %v", ptext, decErr)
+ }
+
+ if string(ptext[:]) == string(decrypted[:]) {
+ t.Fatal("Decrypting a ciphertext with a different key than the one used for encrypting it - should not result in the original plaintext.")
+ }
+}
+
+// TestCBCEncryptCBCDecrypt encrypts with CBCEncrypt and decrypt with CBCDecrypt.
+func TestCBCEncryptCBCDecrypt(t *testing.T) {
+
+ key := make([]byte, primitives.AESKeyLength)
+ rand.Reader.Read(key)
+
+ // 1234567890123456
+ var ptext = []byte("a 16 byte messag")
+
+ encrypted, encErr := primitives.CBCEncrypt(key, ptext)
+ if encErr != nil {
+ t.Fatalf("Error encrypting '%s': %v", ptext, encErr)
+ }
+
+ decrypted, decErr := primitives.CBCDecrypt(key, encrypted)
+ if decErr != nil {
+ t.Fatalf("Error decrypting '%s': %v", ptext, decErr)
+ }
+
+ if string(ptext[:]) != string(decrypted[:]) {
+ t.Fatal("Encryption->Decryption with same key should result in the original plaintext.")
+ }
+}
+
+// TestAESRelatedUtilFunctions tests various functions commonly used in fabric wrt AES
+func TestAESRelatedUtilFunctions(t *testing.T) {
+
+ key, err := primitives.GenAESKey()
+ if err != nil {
+ t.Fatalf("Failed generating AES key [%s]", err)
+ }
+
+ for i := 1; i < 100; i++ {
+ len, err := rand.Int(rand.Reader, big.NewInt(1024))
+ if err != nil {
+ t.Fatalf("Failed generating AES key [%s]", err)
+ }
+ msg, err := primitives.GetRandomBytes(int(len.Int64()) + 1)
+ if err != nil {
+ t.Fatalf("Failed generating AES key [%s]", err)
+ }
+
+ ct, err := primitives.CBCPKCS7Encrypt(key, msg)
+ if err != nil {
+ t.Fatalf("Failed encrypting [%s]", err)
+ }
+
+ msg2, err := primitives.CBCPKCS7Decrypt(key, ct)
+ if err != nil {
+ t.Fatalf("Failed decrypting [%s]", err)
+ }
+
+ if 0 != bytes.Compare(msg, msg2) {
+ t.Fatalf("Wrong decryption output [%x][%x]", msg, msg2)
+ }
+
+ }
+
+}
+
+// TestVariousAESKeyEncoding tests some AES <-> PEM conversions
+func TestVariousAESKeyEncoding(t *testing.T) {
+ key, err := primitives.GenAESKey()
+ if err != nil {
+ t.Fatalf("Failed generating AES key [%s]", err)
+ }
+
+ // PEM format
+ pem := primitives.AEStoPEM(key)
+ keyFromPEM, err := primitives.PEMtoAES(pem, nil)
+ if err != nil {
+ t.Fatalf("Failed converting PEM to AES key [%s]", err)
+ }
+ if 0 != bytes.Compare(key, keyFromPEM) {
+ t.Fatalf("Failed converting PEM to AES key. Keys are different [%x][%x]", key, keyFromPEM)
+ }
+
+ // Encrypted PEM format
+ pem, err = primitives.AEStoEncryptedPEM(key, []byte("passwd"))
+ if err != nil {
+ t.Fatalf("Failed converting AES key to Encrypted PEM [%s]", err)
+ }
+ keyFromPEM, err = primitives.PEMtoAES(pem, []byte("passwd"))
+ if err != nil {
+ t.Fatalf("Failed converting encrypted PEM to AES key [%s]", err)
+ }
+ if 0 != bytes.Compare(key, keyFromPEM) {
+ t.Fatalf("Failed converting encrypted PEM to AES key. Keys are different [%x][%x]", key, keyFromPEM)
+ }
+}
diff --git a/core/crypto/primitives/crypto.go b/core/crypto/primitives/crypto.go
new file mode 100644
index 00000000000..adb33685415
--- /dev/null
+++ b/core/crypto/primitives/crypto.go
@@ -0,0 +1,188 @@
+/*
+Copyright IBM Corp. 2016 All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package primitives
+
+import (
+ "errors"
+ "io"
+)
+
+var (
+ // ErrEncryption Error during encryption
+ ErrEncryption = errors.New("Error during encryption.")
+
+ // ErrDecryption Error during decryption
+ ErrDecryption = errors.New("Error during decryption.")
+
+ // ErrInvalidSecretKeyType Invalid Secret Key type
+ ErrInvalidSecretKeyType = errors.New("Invalid Secret Key type.")
+
+ // ErrInvalidPublicKeyType Invalid Public Key type
+ ErrInvalidPublicKeyType = errors.New("Invalid Public Key type.")
+
+ // ErrInvalidKeyParameter Invalid Key Parameter
+ ErrInvalidKeyParameter = errors.New("Invalid Key Parameter.")
+
+ // ErrInvalidNilKeyParameter Invalid Nil Key Parameter
+ ErrInvalidNilKeyParameter = errors.New("Invalid Nil Key Parameter.")
+
+ // ErrInvalidKeyGeneratorParameter Invalid Key Generator Parameter
+ ErrInvalidKeyGeneratorParameter = errors.New("Invalid Key Generator Parameter.")
+)
+
+// Parameters is common interface for all the parameters
+type Parameters interface {
+
+ // GetRand returns the random generated associated to this parameters
+ GetRand() io.Reader
+}
+
+// CipherParameters is common interface to represent cipher parameters
+type CipherParameters interface {
+ Parameters
+}
+
+// AsymmetricCipherParameters is common interface to represent asymmetric cipher parameters
+type AsymmetricCipherParameters interface {
+ CipherParameters
+
+ // IsPublic returns true if the parameters are public, false otherwise.
+ IsPublic() bool
+}
+
+// PublicKey is common interface to represent public asymmetric cipher parameters
+type PublicKey interface {
+ AsymmetricCipherParameters
+}
+
+// PrivateKey is common interface to represent private asymmetric cipher parameters
+type PrivateKey interface {
+ AsymmetricCipherParameters
+
+ // GetPublicKey returns the associated public key
+ GetPublicKey() PublicKey
+}
+
+// KeyGeneratorParameters is common interface to represent key generation parameters
+type KeyGeneratorParameters interface {
+ Parameters
+}
+
+// KeyGenerator defines a key generator
+type KeyGenerator interface {
+ // Init initializes this generated using the passed parameters
+ Init(params KeyGeneratorParameters) error
+
+ // GenerateKey generates a new private key
+ GenerateKey() (PrivateKey, error)
+}
+
+// AsymmetricCipher defines an asymmetric cipher
+type AsymmetricCipher interface {
+ // Init initializes this cipher with the passed parameters
+ Init(params AsymmetricCipherParameters) error
+
+ // Process processes the byte array given in input
+ Process(msg []byte) ([]byte, error)
+}
+
+// SecretKey defines a symmetric key
+type SecretKey interface {
+ CipherParameters
+}
+
+// StreamCipher defines a stream cipher
+type StreamCipher interface {
+ // Init initializes this cipher with the passed parameters
+ Init(forEncryption bool, params CipherParameters) error
+
+ // Process processes the byte array given in input
+ Process(msg []byte) ([]byte, error)
+}
+
+// KeySerializer defines a key serializer/deserializer
+type KeySerializer interface {
+ // ToBytes converts a key to bytes
+ ToBytes(key interface{}) ([]byte, error)
+
+ // ToBytes converts bytes to a key
+ FromBytes([]byte) (interface{}, error)
+}
+
+// AsymmetricCipherSPI is a Service Provider Interface for AsymmetricCipher
+type AsymmetricCipherSPI interface {
+
+ // NewAsymmetricCipherFromPrivateKey creates a new AsymmetricCipher for decryption from a secret key
+ NewAsymmetricCipherFromPrivateKey(priv PrivateKey) (AsymmetricCipher, error)
+
+ // NewAsymmetricCipherFromPublicKey creates a new AsymmetricCipher for encryption from a public key
+ NewAsymmetricCipherFromPublicKey(pub PublicKey) (AsymmetricCipher, error)
+
+ // NewAsymmetricCipherFromPublicKey creates a new AsymmetricCipher for encryption from a serialized public key
+ NewAsymmetricCipherFromSerializedPublicKey(pub []byte) (AsymmetricCipher, error)
+
+ // NewAsymmetricCipherFromPublicKey creates a new AsymmetricCipher for encryption from a serialized public key
+ NewAsymmetricCipherFromSerializedPrivateKey(priv []byte) (AsymmetricCipher, error)
+
+ // NewPrivateKey creates a new private key rand and default parameters
+ NewDefaultPrivateKey(rand io.Reader) (PrivateKey, error)
+
+ // NewPrivateKey creates a new private key from (rand, params)
+ NewPrivateKey(rand io.Reader, params interface{}) (PrivateKey, error)
+
+ // NewPublicKey creates a new public key from (rand, params)
+ NewPublicKey(rand io.Reader, params interface{}) (PublicKey, error)
+
+ // SerializePrivateKey serializes a private key
+ SerializePrivateKey(priv PrivateKey) ([]byte, error)
+
+ // DeserializePrivateKey deserializes to a private key
+ DeserializePrivateKey(bytes []byte) (PrivateKey, error)
+
+ // SerializePrivateKey serializes a private key
+ SerializePublicKey(pub PublicKey) ([]byte, error)
+
+ // DeserializePrivateKey deserializes to a private key
+ DeserializePublicKey(bytes []byte) (PublicKey, error)
+}
+
+// StreamCipherSPI is a Service Provider Interface for StreamCipher
+type StreamCipherSPI interface {
+ GenerateKey() (SecretKey, error)
+
+ GenerateKeyAndSerialize() (SecretKey, []byte, error)
+
+ NewSecretKey(rand io.Reader, params interface{}) (SecretKey, error)
+
+ // NewStreamCipherForEncryptionFromKey creates a new StreamCipher for encryption from a secret key
+ NewStreamCipherForEncryptionFromKey(secret SecretKey) (StreamCipher, error)
+
+ // NewStreamCipherForEncryptionFromSerializedKey creates a new StreamCipher for encryption from a serialized key
+ NewStreamCipherForEncryptionFromSerializedKey(secret []byte) (StreamCipher, error)
+
+ // NewStreamCipherForDecryptionFromKey creates a new StreamCipher for decryption from a secret key
+ NewStreamCipherForDecryptionFromKey(secret SecretKey) (StreamCipher, error)
+
+ // NewStreamCipherForDecryptionFromKey creates a new StreamCipher for decryption from a serialized key
+ NewStreamCipherForDecryptionFromSerializedKey(secret []byte) (StreamCipher, error)
+
+ // SerializePrivateKey serializes a private key
+ SerializeSecretKey(secret SecretKey) ([]byte, error)
+
+ // DeserializePrivateKey deserializes to a private key
+ DeserializeSecretKey(bytes []byte) (SecretKey, error)
+}
diff --git a/core/crypto/primitives/ecdsa.go b/core/crypto/primitives/ecdsa.go
new file mode 100644
index 00000000000..0550cea94d4
--- /dev/null
+++ b/core/crypto/primitives/ecdsa.go
@@ -0,0 +1,115 @@
+/*
+Copyright IBM Corp. 2016 All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package primitives
+
+import (
+ "crypto/ecdsa"
+ "crypto/rand"
+ "encoding/asn1"
+ "math/big"
+)
+
+// ECDSASignature represents an ECDSA signature
+type ECDSASignature struct {
+ R, S *big.Int
+}
+
+// NewECDSAKey generates a new ECDSA Key
+func NewECDSAKey() (*ecdsa.PrivateKey, error) {
+ return ecdsa.GenerateKey(GetDefaultCurve(), rand.Reader)
+}
+
+// ECDSASignDirect signs
+func ECDSASignDirect(signKey interface{}, msg []byte) (*big.Int, *big.Int, error) {
+ temp := signKey.(*ecdsa.PrivateKey)
+ h := Hash(msg)
+ r, s, err := ecdsa.Sign(rand.Reader, temp, h)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ return r, s, nil
+}
+
+// ECDSASign signs
+func ECDSASign(signKey interface{}, msg []byte) ([]byte, error) {
+ temp := signKey.(*ecdsa.PrivateKey)
+ h := Hash(msg)
+ r, s, err := ecdsa.Sign(rand.Reader, temp, h)
+ if err != nil {
+ return nil, err
+ }
+
+ // R, _ := r.MarshalText()
+ // S, _ := s.MarshalText()
+ //
+ // fmt.Printf("r [%s], s [%s]\n", R, S)
+
+ raw, err := asn1.Marshal(ECDSASignature{r, s})
+ if err != nil {
+ return nil, err
+ }
+
+ return raw, nil
+}
+
+// ECDSAVerify verifies
+func ECDSAVerify(verKey interface{}, msg, signature []byte) (bool, error) {
+ ecdsaSignature := new(ECDSASignature)
+ _, err := asn1.Unmarshal(signature, ecdsaSignature)
+ if err != nil {
+ return false, nil
+ }
+
+ // R, _ := ecdsaSignature.R.MarshalText()
+ // S, _ := ecdsaSignature.S.MarshalText()
+ // fmt.Printf("r [%s], s [%s]\n", R, S)
+
+ temp := verKey.(*ecdsa.PublicKey)
+ h := Hash(msg)
+ return ecdsa.Verify(temp, h, ecdsaSignature.R, ecdsaSignature.S), nil
+}
+
+// VerifySignCapability tests signing capabilities
+func VerifySignCapability(tempSK interface{}, certPK interface{}) error {
+ /* TODO: reactive or remove
+ msg := []byte("This is a message to be signed and verified by ECDSA!")
+
+ sigma, err := ECDSASign(tempSK, msg)
+ if err != nil {
+ // log.Errorf("Error signing [%s].", err.Error())
+
+ return err
+ }
+
+ ok, err := ECDSAVerify(certPK, msg, sigma)
+ if err != nil {
+ // log.Errorf("Error verifying [%s].", err.Error())
+
+ return err
+ }
+
+ if !ok {
+ // log.Errorf("Signature not valid.")
+
+ return errors.New("Signature not valid.")
+ }
+
+ // log.Infof("Verifing signature capability...done")
+ */
+ return nil
+}
diff --git a/core/crypto/primitives/ecies/engine.go b/core/crypto/primitives/ecies/engine.go
new file mode 100644
index 00000000000..8f0dd09972d
--- /dev/null
+++ b/core/crypto/primitives/ecies/engine.go
@@ -0,0 +1,218 @@
+/*
+Copyright IBM Corp. 2016 All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package ecies
+
+import (
+ "crypto/aes"
+ "crypto/cipher"
+ "crypto/ecdsa"
+ "crypto/elliptic"
+ "crypto/hmac"
+ "crypto/rand"
+ "errors"
+ "io"
+
+ "crypto/subtle"
+ "fmt"
+
+ "github.com/hyperledger/fabric/core/crypto/primitives"
+ "golang.org/x/crypto/hkdf"
+)
+
+func aesEncrypt(key, plain []byte) ([]byte, error) {
+ block, err := aes.NewCipher(key)
+ if err != nil {
+ return nil, err
+ }
+
+ text := make([]byte, aes.BlockSize+len(plain))
+ iv := text[:aes.BlockSize]
+ if _, err := io.ReadFull(rand.Reader, iv); err != nil {
+ return nil, err
+ }
+
+ cfb := cipher.NewCFBEncrypter(block, iv)
+ cfb.XORKeyStream(text[aes.BlockSize:], plain)
+
+ return text, nil
+}
+
+func aesDecrypt(key, text []byte) ([]byte, error) {
+ block, err := aes.NewCipher(key)
+ if err != nil {
+ return nil, err
+ }
+
+ if len(text) < aes.BlockSize {
+ return nil, errors.New("cipher text too short")
+ }
+
+ cfb := cipher.NewCFBDecrypter(block, text[:aes.BlockSize])
+ plain := make([]byte, len(text)-aes.BlockSize)
+ cfb.XORKeyStream(plain, text[aes.BlockSize:])
+
+ return plain, nil
+}
+
+func eciesGenerateKey(rand io.Reader, curve elliptic.Curve, params *Params) (*ecdsa.PrivateKey, error) {
+ return ecdsa.GenerateKey(curve, rand)
+}
+
+func eciesEncrypt(rand io.Reader, pub *ecdsa.PublicKey, s1, s2 []byte, plain []byte) ([]byte, error) {
+ params := pub.Curve
+
+ // Select an ephemeral elliptic curve key pair associated with
+ // elliptic curve domain parameters params
+ priv, Rx, Ry, err := elliptic.GenerateKey(pub.Curve, rand)
+ //fmt.Printf("Rx %s\n", utils.EncodeBase64(Rx.Bytes()))
+ //fmt.Printf("Ry %s\n", utils.EncodeBase64(Ry.Bytes()))
+
+ // Convert R=(Rx,Ry) to an octed string R bar
+ // This is uncompressed
+ Rb := elliptic.Marshal(pub.Curve, Rx, Ry)
+
+ // Derive a shared secret field element z from the ephemeral secret key k
+ // and convert z to an octet string Z
+ z, _ := params.ScalarMult(pub.X, pub.Y, priv)
+ Z := z.Bytes()
+ //fmt.Printf("Z %s\n", utils.EncodeBase64(Z))
+
+ // generate keying data K of length ecnKeyLen + macKeyLen octects from Z
+ // ans s1
+ kE := make([]byte, 32)
+ kM := make([]byte, 32)
+ hkdf := hkdf.New(primitives.GetDefaultHash(), Z, s1, nil)
+ _, err = hkdf.Read(kE)
+ if err != nil {
+ return nil, err
+ }
+ _, err = hkdf.Read(kM)
+ if err != nil {
+ return nil, err
+ }
+
+ // Use the encryption operation of the symmetric encryption scheme
+ // to encrypt m under EK as ciphertext EM
+ EM, err := aesEncrypt(kE, plain)
+
+ // Use the tagging operation of the MAC scheme to compute
+ // the tag D on EM || s2
+ mac := hmac.New(primitives.GetDefaultHash(), kM)
+ mac.Write(EM)
+ if len(s2) > 0 {
+ mac.Write(s2)
+ }
+ D := mac.Sum(nil)
+
+ // Output R,EM,D
+ ciphertext := make([]byte, len(Rb)+len(EM)+len(D))
+ //fmt.Printf("Rb %s\n", utils.EncodeBase64(Rb))
+ //fmt.Printf("EM %s\n", utils.EncodeBase64(EM))
+ //fmt.Printf("D %s\n", utils.EncodeBase64(D))
+ copy(ciphertext, Rb)
+ copy(ciphertext[len(Rb):], EM)
+ copy(ciphertext[len(Rb)+len(EM):], D)
+
+ return ciphertext, nil
+}
+
+func eciesDecrypt(priv *ecdsa.PrivateKey, s1, s2 []byte, ciphertext []byte) ([]byte, error) {
+ params := priv.Curve
+
+ var (
+ rLen int
+ hLen = primitives.GetDefaultHash()().Size()
+ mStart int
+ mEnd int
+ )
+
+ //fmt.Printf("Decrypt\n")
+ switch ciphertext[0] {
+ case 2, 3:
+ rLen = ((priv.PublicKey.Curve.Params().BitSize + 7) / 8) + 1
+ if len(ciphertext) < (rLen + hLen + 1) {
+ return nil, fmt.Errorf("Invalid ciphertext len [First byte = %d]", ciphertext[0])
+ }
+ break
+ case 4:
+ rLen = 2*((priv.PublicKey.Curve.Params().BitSize+7)/8) + 1
+ if len(ciphertext) < (rLen + hLen + 1) {
+ return nil, fmt.Errorf("Invalid ciphertext len [First byte = %d]", ciphertext[0])
+ }
+ break
+
+ default:
+ return nil, fmt.Errorf("Invalid ciphertext. Invalid first byte. [%d]", ciphertext[0])
+ }
+
+ mStart = rLen
+ mEnd = len(ciphertext) - hLen
+ //fmt.Printf("Rb %s\n", utils.EncodeBase64(ciphertext[:rLen]))
+
+ Rx, Ry := elliptic.Unmarshal(priv.Curve, ciphertext[:rLen])
+ if Rx == nil {
+ return nil, errors.New("Invalid ephemeral PK")
+ }
+ if !priv.Curve.IsOnCurve(Rx, Ry) {
+ return nil, errors.New("Invalid point on curve")
+ }
+ //fmt.Printf("Rx %s\n", utils.EncodeBase64(Rx.Bytes()))
+ //fmt.Printf("Ry %s\n", utils.EncodeBase64(Ry.Bytes()))
+
+ // Derive a shared secret field element z from the ephemeral secret key k
+ // and convert z to an octet string Z
+ z, _ := params.ScalarMult(Rx, Ry, priv.D.Bytes())
+ Z := z.Bytes()
+ //fmt.Printf("Z %s\n", utils.EncodeBase64(Z))
+
+ // generate keying data K of length ecnKeyLen + macKeyLen octects from Z
+ // ans s1
+ kE := make([]byte, 32)
+ kM := make([]byte, 32)
+ hkdf := hkdf.New(primitives.GetDefaultHash(), Z, s1, nil)
+ _, err := hkdf.Read(kE)
+ if err != nil {
+ return nil, err
+ }
+ _, err = hkdf.Read(kM)
+ if err != nil {
+ return nil, err
+ }
+
+ // Use the tagging operation of the MAC scheme to compute
+ // the tag D on EM || s2 and then compare
+ mac := hmac.New(primitives.GetDefaultHash(), kM)
+ mac.Write(ciphertext[mStart:mEnd])
+ if len(s2) > 0 {
+ mac.Write(s2)
+ }
+ D := mac.Sum(nil)
+
+ //fmt.Printf("EM %s\n", utils.EncodeBase64(ciphertext[mStart:mEnd]))
+ //fmt.Printf("D' %s\n", utils.EncodeBase64(D))
+ //fmt.Printf("D %s\n", utils.EncodeBase64(ciphertext[mEnd:]))
+ if subtle.ConstantTimeCompare(ciphertext[mEnd:], D) != 1 {
+ return nil, errors.New("Tag check failed")
+ }
+
+ // Use the decryption operation of the symmetric encryption scheme
+ // to decryptr EM under EK as plaintext
+
+ plaintext, err := aesDecrypt(kE, ciphertext[mStart:mEnd])
+
+ return plaintext, err
+}
diff --git a/core/crypto/primitives/ecies/es.go b/core/crypto/primitives/ecies/es.go
new file mode 100644
index 00000000000..93c26ec6e06
--- /dev/null
+++ b/core/crypto/primitives/ecies/es.go
@@ -0,0 +1,71 @@
+/*
+Copyright IBM Corp. 2016 All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package ecies
+
+import (
+ "github.com/hyperledger/fabric/core/crypto/primitives"
+ "github.com/hyperledger/fabric/core/crypto/utils"
+)
+
+type encryptionSchemeImpl struct {
+ isForEncryption bool
+
+ // Parameters
+ params primitives.AsymmetricCipherParameters
+ pub *publicKeyImpl
+ priv *secretKeyImpl
+}
+
+func (es *encryptionSchemeImpl) Init(params primitives.AsymmetricCipherParameters) error {
+ if params == nil {
+ return primitives.ErrInvalidNilKeyParameter
+ }
+ es.isForEncryption = params.IsPublic()
+ es.params = params
+
+ if es.isForEncryption {
+ switch pk := params.(type) {
+ case *publicKeyImpl:
+ es.pub = pk
+ default:
+ return primitives.ErrInvalidPublicKeyType
+ }
+ } else {
+ switch sk := params.(type) {
+ case *secretKeyImpl:
+ es.priv = sk
+ default:
+ return primitives.ErrInvalidKeyParameter
+ }
+ }
+
+ return nil
+}
+
+func (es *encryptionSchemeImpl) Process(msg []byte) ([]byte, error) {
+ if len(msg) == 0 {
+ return nil, utils.ErrNilArgument
+ }
+
+ if es.isForEncryption {
+ // Encrypt
+ return eciesEncrypt(es.params.GetRand(), es.pub.pub, nil, nil, msg)
+ }
+
+ // Decrypt
+ return eciesDecrypt(es.priv.priv, nil, nil, msg)
+}
diff --git a/core/crypto/primitives/ecies/kg.go b/core/crypto/primitives/ecies/kg.go
new file mode 100644
index 00000000000..4fef5616b3a
--- /dev/null
+++ b/core/crypto/primitives/ecies/kg.go
@@ -0,0 +1,71 @@
+/*
+Copyright IBM Corp. 2016 All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package ecies
+
+import (
+ "crypto/elliptic"
+ "fmt"
+ "io"
+
+ "github.com/hyperledger/fabric/core/crypto/primitives"
+)
+
+type keyGeneratorParameterImpl struct {
+ rand io.Reader
+ curve elliptic.Curve
+ params *Params
+}
+
+type keyGeneratorImpl struct {
+ isForEncryption bool
+ params *keyGeneratorParameterImpl
+}
+
+func (kgp keyGeneratorParameterImpl) GetRand() io.Reader {
+ return kgp.rand
+}
+
+func (kg *keyGeneratorImpl) Init(params primitives.KeyGeneratorParameters) error {
+ if params == nil {
+ return primitives.ErrInvalidKeyGeneratorParameter
+ }
+ switch kgparams := params.(type) {
+ case *keyGeneratorParameterImpl:
+ kg.params = kgparams
+ default:
+ return primitives.ErrInvalidKeyGeneratorParameter
+ }
+
+ return nil
+}
+
+func (kg *keyGeneratorImpl) GenerateKey() (primitives.PrivateKey, error) {
+ if kg.params == nil {
+ return nil, fmt.Errorf("Key Generator not initliazed")
+ }
+
+ privKey, err := eciesGenerateKey(
+ kg.params.rand,
+ kg.params.curve,
+ kg.params.params,
+ )
+ if err != nil {
+ return nil, err
+ }
+
+ return &secretKeyImpl{privKey, nil, kg.params.params, kg.params.rand}, nil
+}
diff --git a/core/crypto/primitives/ecies/params.go b/core/crypto/primitives/ecies/params.go
new file mode 100644
index 00000000000..de78fcef930
--- /dev/null
+++ b/core/crypto/primitives/ecies/params.go
@@ -0,0 +1,32 @@
+/*
+Copyright IBM Corp. 2016 All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package ecies
+
+import (
+ "crypto"
+ "crypto/cipher"
+ "hash"
+)
+
+// Params ECIES parameters
+type Params struct {
+ Hash func() hash.Hash
+ hashAlgo crypto.Hash
+ Cipher func([]byte) (cipher.Block, error)
+ BlockSize int
+ KeyLen int
+}
diff --git a/core/crypto/primitives/ecies/pk.go b/core/crypto/primitives/ecies/pk.go
new file mode 100644
index 00000000000..a26d2f7b090
--- /dev/null
+++ b/core/crypto/primitives/ecies/pk.go
@@ -0,0 +1,65 @@
+/*
+Copyright IBM Corp. 2016 All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package ecies
+
+import (
+ "crypto/ecdsa"
+ "crypto/rand"
+ "crypto/x509"
+ "io"
+
+ "github.com/hyperledger/fabric/core/crypto/primitives"
+)
+
+type publicKeyImpl struct {
+ pub *ecdsa.PublicKey
+ rand io.Reader
+ params *Params
+}
+
+func (pk *publicKeyImpl) GetRand() io.Reader {
+ return pk.rand
+}
+
+func (pk *publicKeyImpl) IsPublic() bool {
+ return true
+}
+
+type publicKeySerializerImpl struct{}
+
+func (pks *publicKeySerializerImpl) ToBytes(key interface{}) ([]byte, error) {
+ if key == nil {
+ return nil, primitives.ErrInvalidNilKeyParameter
+ }
+
+ switch pk := key.(type) {
+ case *publicKeyImpl:
+ return x509.MarshalPKIXPublicKey(pk.pub)
+ default:
+ return nil, primitives.ErrInvalidPublicKeyType
+ }
+}
+
+func (pks *publicKeySerializerImpl) FromBytes(bytes []byte) (interface{}, error) {
+ key, err := x509.ParsePKIXPublicKey(bytes)
+ if err != nil {
+ return nil, err
+ }
+
+ // TODO: add params here
+ return &publicKeyImpl{key.(*ecdsa.PublicKey), rand.Reader, nil}, nil
+}
diff --git a/core/crypto/primitives/ecies/sk.go b/core/crypto/primitives/ecies/sk.go
new file mode 100644
index 00000000000..409e3c24dc6
--- /dev/null
+++ b/core/crypto/primitives/ecies/sk.go
@@ -0,0 +1,73 @@
+/*
+Copyright IBM Corp. 2016 All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package ecies
+
+import (
+ "crypto/ecdsa"
+ "crypto/rand"
+ "crypto/x509"
+ "io"
+
+ "github.com/hyperledger/fabric/core/crypto/primitives"
+)
+
+type secretKeyImpl struct {
+ priv *ecdsa.PrivateKey
+ pub primitives.PublicKey
+ params *Params
+ rand io.Reader
+}
+
+func (sk *secretKeyImpl) IsPublic() bool {
+ return false
+}
+
+func (sk *secretKeyImpl) GetRand() io.Reader {
+ return sk.rand
+}
+
+func (sk *secretKeyImpl) GetPublicKey() primitives.PublicKey {
+ if sk.pub == nil {
+ sk.pub = &publicKeyImpl{&sk.priv.PublicKey, sk.rand, sk.params}
+ }
+ return sk.pub
+}
+
+type secretKeySerializerImpl struct{}
+
+func (sks *secretKeySerializerImpl) ToBytes(key interface{}) ([]byte, error) {
+ if key == nil {
+ return nil, primitives.ErrInvalidNilKeyParameter
+ }
+
+ switch sk := key.(type) {
+ case *secretKeyImpl:
+ return x509.MarshalECPrivateKey(sk.priv)
+ default:
+ return nil, primitives.ErrInvalidKeyParameter
+ }
+}
+
+func (sks *secretKeySerializerImpl) FromBytes(bytes []byte) (interface{}, error) {
+ key, err := x509.ParseECPrivateKey(bytes)
+ if err != nil {
+ return nil, err
+ }
+
+ // TODO: add params here
+ return &secretKeyImpl{key, nil, nil, rand.Reader}, nil
+}
diff --git a/core/crypto/primitives/ecies/spi.go b/core/crypto/primitives/ecies/spi.go
new file mode 100644
index 00000000000..b991092a4d0
--- /dev/null
+++ b/core/crypto/primitives/ecies/spi.go
@@ -0,0 +1,258 @@
+/*
+Copyright IBM Corp. 2016 All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package ecies
+
+import (
+ "crypto/ecdsa"
+ "crypto/elliptic"
+ "crypto/rand"
+ "fmt"
+ "io"
+
+ "github.com/hyperledger/fabric/core/crypto/primitives"
+)
+
+func newKeyGeneratorParameter(r io.Reader, curve elliptic.Curve) (primitives.KeyGeneratorParameters, error) {
+ if r == nil {
+ r = rand.Reader
+ }
+ return &keyGeneratorParameterImpl{r, curve, nil}, nil
+}
+
+func newKeyGenerator() (primitives.KeyGenerator, error) {
+ return &keyGeneratorImpl{}, nil
+}
+
+func newKeyGeneratorFromCurve(r io.Reader, curve elliptic.Curve) (primitives.KeyGenerator, error) {
+ if r == nil {
+ r = rand.Reader
+ }
+ if curve == nil {
+ curve = primitives.GetDefaultCurve()
+ }
+
+ kg, err := newKeyGenerator()
+ if err != nil {
+ return nil, err
+ }
+
+ kgp, err := newKeyGeneratorParameter(r, curve)
+ if err != nil {
+ return nil, err
+ }
+
+ err = kg.Init(kgp)
+ if err != nil {
+ return nil, err
+ }
+
+ return kg, nil
+}
+
+func newPublicKeyFromECDSA(r io.Reader, pk *ecdsa.PublicKey) (primitives.PublicKey, error) {
+ if r == nil {
+ r = rand.Reader
+ }
+ if pk == nil {
+ return nil, fmt.Errorf("Null ECDSA public key")
+ }
+
+ return &publicKeyImpl{pk, r, nil}, nil
+}
+
+func newPrivateKeyFromECDSA(r io.Reader, sk *ecdsa.PrivateKey) (primitives.PrivateKey, error) {
+ if r == nil {
+ r = rand.Reader
+ }
+ if sk == nil {
+ return nil, fmt.Errorf("Null ECDSA secret key")
+ }
+
+ return &secretKeyImpl{sk, nil, nil, r}, nil
+}
+
+func serializePrivateKey(priv primitives.PrivateKey) ([]byte, error) {
+ if priv == nil {
+ return nil, fmt.Errorf("Null Private Key")
+ }
+
+ serializer := secretKeySerializerImpl{}
+ return serializer.ToBytes(priv)
+}
+
+func deserializePrivateKey(bytes []byte) (primitives.PrivateKey, error) {
+ if len(bytes) == 0 {
+ return nil, fmt.Errorf("Null bytes")
+ }
+
+ serializer := secretKeySerializerImpl{}
+ priv, err := serializer.FromBytes(bytes)
+ if err != nil {
+ return nil, err
+ }
+
+ return priv.(primitives.PrivateKey), nil
+}
+
+func serializePublicKey(pub primitives.PublicKey) ([]byte, error) {
+ if pub == nil {
+ return nil, fmt.Errorf("Null Public Key")
+ }
+
+ serializer := publicKeySerializerImpl{}
+ return serializer.ToBytes(pub)
+}
+
+func deserializePublicKey(bytes []byte) (primitives.PublicKey, error) {
+ if len(bytes) == 0 {
+ return nil, fmt.Errorf("Null bytes")
+ }
+
+ serializer := publicKeySerializerImpl{}
+ pub, err := serializer.FromBytes(bytes)
+ if err != nil {
+ return nil, err
+ }
+
+ return pub.(primitives.PublicKey), nil
+}
+
+func newAsymmetricCipher() (primitives.AsymmetricCipher, error) {
+ return &encryptionSchemeImpl{}, nil
+}
+
+func newPrivateKey(r io.Reader, curve elliptic.Curve) (primitives.PrivateKey, error) {
+ if r == nil {
+ r = rand.Reader
+ }
+ if curve == nil {
+ curve = primitives.GetDefaultCurve()
+ }
+ kg, err := newKeyGeneratorFromCurve(r, curve)
+ if err != nil {
+ return nil, err
+ }
+ return kg.GenerateKey()
+}
+
+func newAsymmetricCipherFromPrivateKey(priv primitives.PrivateKey) (primitives.AsymmetricCipher, error) {
+ if priv == nil {
+ return nil, fmt.Errorf("Null Private Key")
+ }
+
+ es, err := newAsymmetricCipher()
+ if err != nil {
+ return nil, err
+ }
+
+ err = es.Init(priv)
+ if err != nil {
+ return nil, err
+ }
+
+ return es, nil
+}
+
+func newAsymmetricCipherFromPublicKey(pub primitives.PublicKey) (primitives.AsymmetricCipher, error) {
+ if pub == nil {
+ return nil, fmt.Errorf("Null Public Key")
+ }
+
+ es, err := newAsymmetricCipher()
+ if err != nil {
+ return nil, err
+ }
+
+ err = es.Init(pub)
+ if err != nil {
+ return nil, err
+ }
+
+ return es, nil
+}
+
+// NewSPI returns a new SPI instance
+func NewSPI() primitives.AsymmetricCipherSPI {
+ return &spiImpl{}
+}
+
+type spiImpl struct {
+}
+
+func (spi *spiImpl) NewAsymmetricCipherFromPrivateKey(priv primitives.PrivateKey) (primitives.AsymmetricCipher, error) {
+ return newAsymmetricCipherFromPrivateKey(priv)
+}
+
+func (spi *spiImpl) NewAsymmetricCipherFromPublicKey(pub primitives.PublicKey) (primitives.AsymmetricCipher, error) {
+ return newAsymmetricCipherFromPublicKey(pub)
+}
+
+func (spi *spiImpl) NewAsymmetricCipherFromSerializedPublicKey(pub []byte) (primitives.AsymmetricCipher, error) {
+ pk, err := spi.DeserializePublicKey(pub)
+ if err != nil {
+ return nil, err
+ }
+ return newAsymmetricCipherFromPublicKey(pk)
+}
+
+func (spi *spiImpl) NewAsymmetricCipherFromSerializedPrivateKey(priv []byte) (primitives.AsymmetricCipher, error) {
+ sk, err := spi.DeserializePrivateKey(priv)
+ if err != nil {
+ return nil, err
+ }
+ return newAsymmetricCipherFromPrivateKey(sk)
+}
+
+func (spi *spiImpl) NewPrivateKey(r io.Reader, params interface{}) (primitives.PrivateKey, error) {
+ switch t := params.(type) {
+ case *ecdsa.PrivateKey:
+ return newPrivateKeyFromECDSA(r, t)
+ case elliptic.Curve:
+ return newPrivateKey(r, t)
+ default:
+ return nil, primitives.ErrInvalidKeyGeneratorParameter
+ }
+}
+
+func (spi *spiImpl) NewDefaultPrivateKey(r io.Reader) (primitives.PrivateKey, error) {
+ return spi.NewPrivateKey(r, primitives.GetDefaultCurve())
+}
+
+func (spi *spiImpl) NewPublicKey(r io.Reader, params interface{}) (primitives.PublicKey, error) {
+ switch t := params.(type) {
+ case *ecdsa.PublicKey:
+ return newPublicKeyFromECDSA(r, t)
+ default:
+ return nil, primitives.ErrInvalidKeyGeneratorParameter
+ }
+}
+
+func (spi *spiImpl) SerializePrivateKey(priv primitives.PrivateKey) ([]byte, error) {
+ return serializePrivateKey(priv)
+}
+
+func (spi *spiImpl) DeserializePrivateKey(bytes []byte) (primitives.PrivateKey, error) {
+ return deserializePrivateKey(bytes)
+}
+
+func (spi *spiImpl) SerializePublicKey(priv primitives.PublicKey) ([]byte, error) {
+ return serializePublicKey(priv)
+}
+
+func (spi *spiImpl) DeserializePublicKey(bytes []byte) (primitives.PublicKey, error) {
+ return deserializePublicKey(bytes)
+}
diff --git a/core/crypto/primitives/ecies/spi_test.go b/core/crypto/primitives/ecies/spi_test.go
new file mode 100644
index 00000000000..593f0a395eb
--- /dev/null
+++ b/core/crypto/primitives/ecies/spi_test.go
@@ -0,0 +1,310 @@
+/*
+Copyright IBM Corp. 2016 All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package ecies
+
+import (
+ "crypto/ecdsa"
+ "crypto/rand"
+ "fmt"
+ "os"
+ "reflect"
+ "testing"
+
+ "github.com/hyperledger/fabric/core/crypto/primitives"
+)
+
+type TestParameters struct {
+ hashFamily string
+ securityLevel int
+}
+
+func (t *TestParameters) String() string {
+ return t.hashFamily + "-" + string(t.securityLevel)
+}
+
+var testParametersSet = []*TestParameters{
+ &TestParameters{"SHA3", 256},
+ &TestParameters{"SHA3", 384},
+ &TestParameters{"SHA2", 256},
+ &TestParameters{"SHA2", 384}}
+
+func TestMain(m *testing.M) {
+ for _, params := range testParametersSet {
+ err := primitives.SetSecurityLevel(params.hashFamily, params.securityLevel)
+ if err == nil {
+ m.Run()
+ } else {
+ panic(fmt.Errorf("Failed initiliazing crypto layer at [%s]", params.String()))
+ }
+ }
+ os.Exit(0)
+}
+
+func TestSPINewDefaultPrivateKey(t *testing.T) {
+ spi := NewSPI()
+
+ if _, err := spi.NewDefaultPrivateKey(rand.Reader); err != nil {
+ t.Fatalf("Failed generating key [%s]", err)
+ }
+
+ if _, err := spi.NewDefaultPrivateKey(nil); err != nil {
+ t.Fatalf("Failed generating key [%s]", err)
+ }
+}
+
+func TestSPINewPrivateKeyFromCurve(t *testing.T) {
+ spi := NewSPI()
+
+ if _, err := spi.NewPrivateKey(rand.Reader, primitives.GetDefaultCurve()); err != nil {
+ t.Fatalf("Failed generating key [%s]", err)
+ }
+
+ if _, err := spi.NewPrivateKey(nil, primitives.GetDefaultCurve()); err != nil {
+ t.Fatalf("Failed generating key [%s]", err)
+ }
+
+ if _, err := spi.NewPrivateKey(nil, nil); err == nil {
+ t.Fatalf("Generating key should file with nil params.")
+ }
+}
+
+func TestSPINewPrivateKeyFromECDSAKey(t *testing.T) {
+ spi := NewSPI()
+
+ ecdsaKey, err := ecdsa.GenerateKey(primitives.GetDefaultCurve(), rand.Reader)
+ if err != nil {
+ t.Fatalf("Failed generating ECDSA key [%s]", err)
+ }
+
+ if _, err := spi.NewPrivateKey(rand.Reader, ecdsaKey); err != nil {
+ t.Fatalf("Failed generating key [%s]", err)
+ }
+
+ if _, err := spi.NewPrivateKey(nil, ecdsaKey); err != nil {
+ t.Fatalf("Failed generating key [%s]", err)
+ }
+}
+
+func TestSPINewPublicKeyFromECDSAKey(t *testing.T) {
+ spi := NewSPI()
+
+ ecdsaKey, err := ecdsa.GenerateKey(primitives.GetDefaultCurve(), rand.Reader)
+ if err != nil {
+ t.Fatalf("Failed generating ECDSA key [%s]", err)
+ }
+
+ if _, err := spi.NewPublicKey(rand.Reader, &ecdsaKey.PublicKey); err != nil {
+ t.Fatalf("Failed generating key [%s]", err)
+ }
+
+ if _, err := spi.NewPublicKey(nil, &ecdsaKey.PublicKey); err != nil {
+ t.Fatalf("Failed generating key [%s]", err)
+ }
+
+ if _, err := spi.NewPublicKey(nil, nil); err == nil {
+ t.Fatalf("Generating key should file with nil params.")
+ }
+}
+
+func TestSPINewAsymmetricCipherFrom(t *testing.T) {
+ spi := NewSPI()
+
+ key, err := spi.NewDefaultPrivateKey(nil)
+ if err != nil {
+ t.Fatalf("Failed generating key [%s]", err)
+ }
+
+ if _, err := spi.NewAsymmetricCipherFromPrivateKey(key); err != nil {
+ t.Fatalf("Failed creating AsymCipher from private key [%s]", err)
+ }
+
+ if _, err := spi.NewAsymmetricCipherFromPrivateKey(nil); err == nil {
+ t.Fatalf("Creating AsymCipher from private key shoud fail with nil key")
+ }
+
+ if _, err := spi.NewAsymmetricCipherFromPublicKey(key.GetPublicKey()); err != nil {
+ t.Fatalf("Failed creating AsymCipher from public key [%s]", err)
+ }
+
+ if _, err := spi.NewAsymmetricCipherFromPublicKey(nil); err == nil {
+ t.Fatalf("Creating AsymCipher from public key shoud fail with nil key")
+ }
+}
+
+func TestSPIEncryption(t *testing.T) {
+ spi := NewSPI()
+
+ key, err := spi.NewDefaultPrivateKey(nil)
+ if err != nil {
+ t.Fatalf("Failed generating key [%s]", err)
+ }
+
+ // Encrypt
+ aCipher, err := spi.NewAsymmetricCipherFromPublicKey(key.GetPublicKey())
+ if err != nil {
+ t.Fatalf("Failed creating AsymCipher from public key [%s]", err)
+ }
+ msg := []byte("Hello World")
+ ct, err := aCipher.Process(msg)
+ if err != nil {
+ t.Fatalf("Failed encrypting [%s]", err)
+ }
+
+ // Decrypt
+ aCipher, err = spi.NewAsymmetricCipherFromPublicKey(key)
+ if err != nil {
+ t.Fatalf("Failed creating AsymCipher from private key [%s]", err)
+ }
+ recoveredMsg, err := aCipher.Process(ct)
+ if err != nil {
+ t.Fatalf("Failed decrypting [%s]", err)
+ }
+ if !reflect.DeepEqual(msg, recoveredMsg) {
+ t.Fatalf("Failed decrypting. Output is different [%x][%x]", msg, recoveredMsg)
+ }
+}
+
+func TestSPIStressEncryption(t *testing.T) {
+ spi := NewSPI()
+
+ key, err := spi.NewDefaultPrivateKey(nil)
+ if err != nil {
+ t.Fatalf("Failed generating key [%s]", err)
+ }
+
+ // Encrypt
+ aCipher, err := spi.NewAsymmetricCipherFromPublicKey(key.GetPublicKey())
+ if err != nil {
+ t.Fatalf("Failed creating AsymCipher from public key [%s]", err)
+ }
+ _, err = aCipher.Process(nil)
+ if err == nil {
+ t.Fatalf("Encrypting nil should fail")
+ }
+
+}
+
+func TestSPIStressDecryption(t *testing.T) {
+ spi := NewSPI()
+
+ key, err := spi.NewDefaultPrivateKey(nil)
+ if err != nil {
+ t.Fatalf("Failed generating key [%s]", err)
+ }
+
+ // Decrypt
+ aCipher, err := spi.NewAsymmetricCipherFromPublicKey(key)
+ if err != nil {
+ t.Fatalf("Failed creating AsymCipher from private key [%s]", err)
+ }
+ _, err = aCipher.Process(nil)
+ if err == nil {
+ t.Fatalf("Decrypting nil should fail")
+ }
+
+ _, err = aCipher.Process([]byte{0, 1, 2, 3})
+ if err == nil {
+ t.Fatalf("Decrypting invalid ciphertxt should fail")
+ }
+
+}
+
+func TestPrivateKeySerialization(t *testing.T) {
+ spi := NewSPI()
+
+ aKey, err := spi.NewDefaultPrivateKey(rand.Reader)
+ if err != nil {
+ t.Fatalf("Failed generating key [%s]", err)
+ }
+
+ bytes, err := spi.SerializePrivateKey(aKey)
+ if err != nil {
+ t.Fatalf("Failed serializing private key [%s]", err)
+ }
+
+ recoveredKey, err := spi.DeserializePrivateKey(bytes)
+ if err != nil {
+ t.Fatalf("Failed serializing private key [%s]", err)
+ }
+
+ // Encrypt
+ aCipher, err := spi.NewAsymmetricCipherFromPublicKey(aKey.GetPublicKey())
+ if err != nil {
+ t.Fatalf("Failed creating AsymCipher from public key [%s]", err)
+ }
+ msg := []byte("Hello World")
+ ct, err := aCipher.Process(msg)
+ if err != nil {
+ t.Fatalf("Failed encrypting [%s]", err)
+ }
+
+ // Decrypt
+ aCipher, err = spi.NewAsymmetricCipherFromPublicKey(recoveredKey)
+ if err != nil {
+ t.Fatalf("Failed creating AsymCipher from private key [%s]", err)
+ }
+ recoveredMsg, err := aCipher.Process(ct)
+ if err != nil {
+ t.Fatalf("Failed decrypting [%s]", err)
+ }
+ if !reflect.DeepEqual(msg, recoveredMsg) {
+ t.Fatalf("Failed decrypting. Output is different [%x][%x]", msg, recoveredMsg)
+ }
+}
+
+func TestPublicKeySerialization(t *testing.T) {
+ spi := NewSPI()
+
+ aKey, err := spi.NewDefaultPrivateKey(rand.Reader)
+ if err != nil {
+ t.Fatalf("Failed generating key [%s]", err)
+ }
+
+ bytes, err := spi.SerializePublicKey(aKey.GetPublicKey())
+ if err != nil {
+ t.Fatalf("Failed serializing private key [%s]", err)
+ }
+
+ pk, err := spi.DeserializePublicKey(bytes)
+ if err != nil {
+ t.Fatalf("Failed serializing private key [%s]", err)
+ }
+
+ // Encrypt
+ aCipher, err := spi.NewAsymmetricCipherFromPublicKey(pk)
+ if err != nil {
+ t.Fatalf("Failed creating AsymCipher from public key [%s]", err)
+ }
+ msg := []byte("Hello World")
+ ct, err := aCipher.Process(msg)
+ if err != nil {
+ t.Fatalf("Failed encrypting [%s]", err)
+ }
+
+ // Decrypt
+ aCipher, err = spi.NewAsymmetricCipherFromPublicKey(aKey)
+ if err != nil {
+ t.Fatalf("Failed creating AsymCipher from private key [%s]", err)
+ }
+ recoveredMsg, err := aCipher.Process(ct)
+ if err != nil {
+ t.Fatalf("Failed decrypting [%s]", err)
+ }
+ if !reflect.DeepEqual(msg, recoveredMsg) {
+ t.Fatalf("Failed decrypting. Output is different [%x][%x]", msg, recoveredMsg)
+ }
+}
diff --git a/core/crypto/primitives/elliptic.go b/core/crypto/primitives/elliptic.go
new file mode 100644
index 00000000000..e9e88cd6f0d
--- /dev/null
+++ b/core/crypto/primitives/elliptic.go
@@ -0,0 +1,30 @@
+/*
+Copyright IBM Corp. 2016 All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package primitives
+
+import (
+ "crypto/elliptic"
+)
+
+var (
+ defaultCurve elliptic.Curve
+)
+
+// GetDefaultCurve returns the default elliptic curve used by the crypto layer
+func GetDefaultCurve() elliptic.Curve {
+ return defaultCurve
+}
diff --git a/core/crypto/primitives/hash.go b/core/crypto/primitives/hash.go
new file mode 100644
index 00000000000..53230108ebb
--- /dev/null
+++ b/core/crypto/primitives/hash.go
@@ -0,0 +1,70 @@
+/*
+Copyright IBM Corp. 2016 All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package primitives
+
+import (
+ "crypto/hmac"
+ "hash"
+)
+
+var (
+ defaultHash func() hash.Hash
+ defaultHashAlgorithm string
+)
+
+// GetDefaultHash returns the default hash function used by the crypto layer
+func GetDefaultHash() func() hash.Hash {
+ return defaultHash
+}
+
+// GetHashAlgorithm return the default hash algorithm
+func GetHashAlgorithm() string {
+ return defaultHashAlgorithm
+}
+
+// NewHash returns a new hash function
+func NewHash() hash.Hash {
+ return GetDefaultHash()()
+}
+
+// Hash hashes the msh using the predefined hash function
+func Hash(msg []byte) []byte {
+ hash := NewHash()
+ hash.Write(msg)
+ return hash.Sum(nil)
+}
+
+// HMAC hmacs x using key key
+func HMAC(key, x []byte) []byte {
+ mac := hmac.New(GetDefaultHash(), key)
+ mac.Write(x)
+
+ return mac.Sum(nil)
+}
+
+// HMACTruncated hmacs x using key key and truncate to truncation
+func HMACTruncated(key, x []byte, truncation int) []byte {
+ mac := hmac.New(GetDefaultHash(), key)
+ mac.Write(x)
+
+ return mac.Sum(nil)[:truncation]
+}
+
+// HMACAESTruncated hmacs x using key key and truncate to AESKeyLength
+func HMACAESTruncated(key, x []byte) []byte {
+ return HMACTruncated(key, x, AESKeyLength)
+}
diff --git a/core/crypto/primitives/init.go b/core/crypto/primitives/init.go
new file mode 100644
index 00000000000..b42a4f5f57f
--- /dev/null
+++ b/core/crypto/primitives/init.go
@@ -0,0 +1,87 @@
+/*
+Copyright IBM Corp. 2016 All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package primitives
+
+import (
+ "crypto/elliptic"
+ "crypto/sha256"
+ "crypto/sha512"
+ "fmt"
+ "sync"
+
+ "golang.org/x/crypto/sha3"
+)
+
+var (
+ initOnce sync.Once
+)
+
+// Init SHA2
+func initSHA2(level int) (err error) {
+ switch level {
+ case 256:
+ defaultCurve = elliptic.P256()
+ defaultHash = sha256.New
+ case 384:
+ defaultCurve = elliptic.P384()
+ defaultHash = sha512.New384
+ default:
+ err = fmt.Errorf("Security level not supported [%d]", level)
+ }
+ return
+}
+
+// Init SHA3
+func initSHA3(level int) (err error) {
+ switch level {
+ case 256:
+ defaultCurve = elliptic.P256()
+ defaultHash = sha3.New256
+ case 384:
+ defaultCurve = elliptic.P384()
+ defaultHash = sha3.New384
+ default:
+ err = fmt.Errorf("Security level not supported [%d]", level)
+ }
+ return
+}
+
+// SetSecurityLevel sets the security configuration with the hash length and the algorithm
+func SetSecurityLevel(algorithm string, level int) (err error) {
+ switch algorithm {
+ case "SHA2":
+ err = initSHA2(level)
+ case "SHA3":
+ err = initSHA3(level)
+ default:
+ err = fmt.Errorf("Algorithm not supported [%s]", algorithm)
+ }
+ if err == nil {
+ // TODO: what's this
+ defaultHashAlgorithm = algorithm
+ //hashLength = level
+ }
+ return
+}
+
+// InitSecurityLevel initialize the crypto layer at the given security level
+func InitSecurityLevel(algorithm string, level int) (err error) {
+ initOnce.Do(func() {
+ err = SetSecurityLevel(algorithm, level)
+ })
+ return
+}
diff --git a/core/crypto/primitives/keys.go b/core/crypto/primitives/keys.go
new file mode 100644
index 00000000000..9ab128fbf9c
--- /dev/null
+++ b/core/crypto/primitives/keys.go
@@ -0,0 +1,309 @@
+/*
+Copyright IBM Corp. 2016 All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package primitives
+
+import (
+ "crypto/ecdsa"
+ "crypto/rand"
+ "crypto/rsa"
+ "crypto/x509"
+ "encoding/pem"
+ "errors"
+ "fmt"
+
+ "github.com/hyperledger/fabric/core/crypto/utils"
+)
+
+// PrivateKeyToDER marshals a private key to der
+func PrivateKeyToDER(privateKey *ecdsa.PrivateKey) ([]byte, error) {
+ if privateKey == nil {
+ return nil, utils.ErrNilArgument
+ }
+
+ return x509.MarshalECPrivateKey(privateKey)
+}
+
+// PrivateKeyToPEM converts a private key to PEM
+func PrivateKeyToPEM(privateKey interface{}, pwd []byte) ([]byte, error) {
+ if len(pwd) != 0 {
+ return PrivateKeyToEncryptedPEM(privateKey, pwd)
+ }
+
+ switch x := privateKey.(type) {
+ case *ecdsa.PrivateKey:
+ raw, err := x509.MarshalECPrivateKey(x)
+
+ if err != nil {
+ return nil, err
+ }
+
+ return pem.EncodeToMemory(
+ &pem.Block{
+ Type: "ECDSA PRIVATE KEY",
+ Bytes: raw,
+ },
+ ), nil
+ default:
+ return nil, utils.ErrInvalidKey
+ }
+}
+
+// PrivateKeyToEncryptedPEM converts a private key to an encrypted PEM
+func PrivateKeyToEncryptedPEM(privateKey interface{}, pwd []byte) ([]byte, error) {
+ switch x := privateKey.(type) {
+ case *ecdsa.PrivateKey:
+ raw, err := x509.MarshalECPrivateKey(x)
+
+ if err != nil {
+ return nil, err
+ }
+
+ block, err := x509.EncryptPEMBlock(
+ rand.Reader,
+ "ECDSA PRIVATE KEY",
+ raw,
+ pwd,
+ x509.PEMCipherAES256)
+
+ if err != nil {
+ return nil, err
+ }
+
+ return pem.EncodeToMemory(block), nil
+
+ default:
+ return nil, utils.ErrInvalidKey
+ }
+}
+
+// DERToPrivateKey unmarshals a der to private key
+func DERToPrivateKey(der []byte) (key interface{}, err error) {
+ //fmt.Printf("DER [%s]\n", EncodeBase64(der))
+
+ if key, err = x509.ParsePKCS1PrivateKey(der); err == nil {
+ return key, nil
+ }
+ //fmt.Printf("DERToPrivateKey Err [%s]\n", err)
+ if key, err = x509.ParsePKCS8PrivateKey(der); err == nil {
+ switch key.(type) {
+ case *rsa.PrivateKey, *ecdsa.PrivateKey:
+ return
+ default:
+ return nil, errors.New("Found unknown private key type in PKCS#8 wrapping")
+ }
+ }
+ //fmt.Printf("DERToPrivateKey Err [%s]\n", err)
+ if key, err = x509.ParseECPrivateKey(der); err == nil {
+ return
+ }
+ //fmt.Printf("DERToPrivateKey Err [%s]\n", err)
+
+ return nil, errors.New("Failed to parse private key")
+}
+
+// PEMtoPrivateKey unmarshals a pem to private key
+func PEMtoPrivateKey(raw []byte, pwd []byte) (interface{}, error) {
+ if len(raw) == 0 {
+ return nil, utils.ErrNilArgument
+ }
+ block, _ := pem.Decode(raw)
+ if block == nil {
+ return nil, fmt.Errorf("Failed decoding [% x]", raw)
+ }
+
+ // TODO: derive from header the type of the key
+
+ if x509.IsEncryptedPEMBlock(block) {
+ if len(pwd) == 0 {
+ return nil, errors.New("Encrypted Key. Need a password!!!")
+ }
+
+ decrypted, err := x509.DecryptPEMBlock(block, pwd)
+ if err != nil {
+ return nil, errors.New("Failed decryption!!!")
+ }
+
+ key, err := DERToPrivateKey(decrypted)
+ if err != nil {
+ return nil, err
+ }
+ return key, err
+ }
+
+ cert, err := DERToPrivateKey(block.Bytes)
+ if err != nil {
+ return nil, err
+ }
+ return cert, err
+}
+
+// PEMtoAES extracts from the PEM an AES key
+func PEMtoAES(raw []byte, pwd []byte) ([]byte, error) {
+ if len(raw) == 0 {
+ return nil, utils.ErrNilArgument
+ }
+ block, _ := pem.Decode(raw)
+ if block == nil {
+ return nil, fmt.Errorf("Failed decoding [% x]", raw)
+ }
+
+ if x509.IsEncryptedPEMBlock(block) {
+ if len(pwd) == 0 {
+ return nil, errors.New("Encrypted Key. Need a password!!!")
+ }
+
+ decrypted, err := x509.DecryptPEMBlock(block, pwd)
+ if err != nil {
+ return nil, err
+ }
+ return decrypted, nil
+ }
+
+ return block.Bytes, nil
+}
+
+// AEStoPEM encapsulates an AES key in the PEM format
+func AEStoPEM(raw []byte) []byte {
+ return pem.EncodeToMemory(&pem.Block{Type: "AES PRIVATE KEY", Bytes: raw})
+}
+
+// AEStoEncryptedPEM encapsulates an AES key in the encrypted PEM format
+func AEStoEncryptedPEM(raw []byte, pwd []byte) ([]byte, error) {
+ if len(pwd) == 0 {
+ return AEStoPEM(raw), nil
+ }
+
+ block, err := x509.EncryptPEMBlock(
+ rand.Reader,
+ "AES PRIVATE KEY",
+ raw,
+ pwd,
+ x509.PEMCipherAES256)
+
+ if err != nil {
+ return nil, err
+ }
+
+ return pem.EncodeToMemory(block), nil
+}
+
+/*
+func PublicKeyToDER(publicKey interface{}) ([]byte, error) {
+ return x509.MarshalPKIXPublicKey(publicKey)
+}
+
+func DERToPublicKey(derBytes []byte) (pub interface{}, err error) {
+ key, err := x509.ParsePKIXPublicKey(derBytes)
+
+ return key, err
+}
+*/
+
+// PublicKeyToPEM marshals a public key to the pem forma
+func PublicKeyToPEM(publicKey interface{}, pwd []byte) ([]byte, error) {
+ if len(pwd) != 0 {
+ return PublicKeyToEncryptedPEM(publicKey, pwd)
+ }
+
+ switch x := publicKey.(type) {
+ case *ecdsa.PublicKey:
+ PubASN1, err := x509.MarshalPKIXPublicKey(x)
+ if err != nil {
+ return nil, err
+ }
+
+ return pem.EncodeToMemory(
+ &pem.Block{
+ Type: "ECDSA PUBLIC KEY",
+ Bytes: PubASN1,
+ },
+ ), nil
+
+ default:
+ return nil, utils.ErrInvalidKey
+ }
+}
+
+// PublicKeyToEncryptedPEM converts a public key to encrypted pem
+func PublicKeyToEncryptedPEM(publicKey interface{}, pwd []byte) ([]byte, error) {
+ switch x := publicKey.(type) {
+ case *ecdsa.PublicKey:
+ raw, err := x509.MarshalPKIXPublicKey(x)
+
+ if err != nil {
+ return nil, err
+ }
+
+ block, err := x509.EncryptPEMBlock(
+ rand.Reader,
+ "ECDSA PUBLIC KEY",
+ raw,
+ pwd,
+ x509.PEMCipherAES256)
+
+ if err != nil {
+ return nil, err
+ }
+
+ return pem.EncodeToMemory(block), nil
+
+ default:
+ return nil, utils.ErrInvalidKey
+ }
+}
+
+// PEMtoPublicKey unmarshals a pem to public key
+func PEMtoPublicKey(raw []byte, pwd []byte) (interface{}, error) {
+ if len(raw) == 0 {
+ return nil, utils.ErrNilArgument
+ }
+ block, _ := pem.Decode(raw)
+ if block == nil {
+ return nil, fmt.Errorf("Failed decoding [% x]", raw)
+ }
+
+ // TODO: derive from header the type of the key
+ if x509.IsEncryptedPEMBlock(block) {
+ if len(pwd) == 0 {
+ return nil, errors.New("Encrypted Key. Need a password!!!")
+ }
+
+ decrypted, err := x509.DecryptPEMBlock(block, pwd)
+ if err != nil {
+ return nil, errors.New("Failed decryption!!!")
+ }
+
+ key, err := DERToPublicKey(decrypted)
+ if err != nil {
+ return nil, err
+ }
+ return key, err
+ }
+
+ cert, err := DERToPublicKey(block.Bytes)
+ if err != nil {
+ return nil, err
+ }
+ return cert, err
+}
+
+// DERToPublicKey unmarshals a der to public key
+func DERToPublicKey(derBytes []byte) (pub interface{}, err error) {
+ key, err := x509.ParsePKIXPublicKey(derBytes)
+
+ return key, err
+}
diff --git a/core/crypto/primitives/primitives_test.go b/core/crypto/primitives/primitives_test.go
new file mode 100644
index 00000000000..4ed166555b2
--- /dev/null
+++ b/core/crypto/primitives/primitives_test.go
@@ -0,0 +1,421 @@
+package primitives
+
+import (
+ "crypto/ecdsa"
+ "crypto/rand"
+ "encoding/asn1"
+ "fmt"
+ "math/big"
+ "os"
+ "reflect"
+ "testing"
+)
+
+type TestParameters struct {
+ hashFamily string
+ securityLevel int
+}
+
+func (t *TestParameters) String() string {
+ return t.hashFamily + "-" + string(t.securityLevel)
+}
+
+var testParametersSet = []*TestParameters{
+ &TestParameters{"SHA3", 256},
+ &TestParameters{"SHA3", 384},
+ &TestParameters{"SHA2", 256},
+ &TestParameters{"SHA2", 384}}
+
+func TestMain(m *testing.M) {
+ for _, params := range testParametersSet {
+ err := InitSecurityLevel(params.hashFamily, params.securityLevel)
+ if err == nil {
+ m.Run()
+ } else {
+ panic(fmt.Errorf("Failed initiliazing crypto layer at [%s]", params.String()))
+ }
+
+ err = SetSecurityLevel(params.hashFamily, params.securityLevel)
+ if err == nil {
+ m.Run()
+ } else {
+ panic(fmt.Errorf("Failed initiliazing crypto layer at [%s]", params.String()))
+ }
+
+ if GetHashAlgorithm() != params.hashFamily {
+ panic(fmt.Errorf("Failed initiliazing crypto layer. Invalid Hash family [%s][%s]", GetHashAlgorithm(), params.hashFamily))
+ }
+
+ }
+ os.Exit(0)
+}
+
+func TestInitSecurityLevel(t *testing.T) {
+ err := SetSecurityLevel("SHA2", 1024)
+ if err == nil {
+ t.Fatalf("Initialization should fail")
+ }
+
+ err = SetSecurityLevel("SHA", 1024)
+ if err == nil {
+ t.Fatalf("Initialization should fail")
+ }
+
+ err = SetSecurityLevel("SHA3", 2048)
+ if err == nil {
+ t.Fatalf("Initialization should fail")
+ }
+}
+
+func TestECDSA(t *testing.T) {
+
+ key, err := NewECDSAKey()
+ if err != nil {
+ t.Fatalf("Failed generating ECDSA key [%s]", err)
+ }
+
+ for i := 1; i < 100; i++ {
+ length, err := rand.Int(rand.Reader, big.NewInt(1024))
+ if err != nil {
+ t.Fatalf("Failed generating AES key [%s]", err)
+ }
+ msg, err := GetRandomBytes(int(length.Int64()) + 1)
+ if err != nil {
+ t.Fatalf("Failed generating AES key [%s]", err)
+ }
+
+ sigma, err := ECDSASign(key, msg)
+ if err != nil {
+ t.Fatalf("Failed signing [%s]", err)
+ }
+
+ ok, err := ECDSAVerify(key.Public(), msg, sigma)
+ if err != nil {
+ t.Fatalf("Failed verifying [%s]", err)
+ }
+ if !ok {
+ t.Fatalf("Failed verification.")
+ }
+
+ ok, err = ECDSAVerify(key.Public(), msg[:len(msg)-1], sigma)
+ if err != nil {
+ t.Fatalf("Failed verifying [%s]", err)
+ }
+ if ok {
+ t.Fatalf("Verification should fail.")
+ }
+
+ ok, err = ECDSAVerify(key.Public(), msg[:1], sigma[:1])
+ if err != nil {
+ t.Fatalf("Failed verifying [%s]", err)
+ }
+ if ok {
+ t.Fatalf("Verification should fail.")
+ }
+
+ R, S, err := ECDSASignDirect(key, msg)
+ if err != nil {
+ t.Fatalf("Failed signing (direct) [%s]", err)
+ }
+ if sigma, err = asn1.Marshal(ECDSASignature{R, S}); err != nil {
+ t.Fatalf("Failed marshalling (R,S) [%s]", err)
+ }
+ ok, err = ECDSAVerify(key.Public(), msg, sigma)
+ if err != nil {
+ t.Fatalf("Failed verifying [%s]", err)
+ }
+ if !ok {
+ t.Fatalf("Failed verification.")
+ }
+ }
+}
+
+func TestECDSAKeys(t *testing.T) {
+ key, err := NewECDSAKey()
+ if err != nil {
+ t.Fatalf("Failed generating ECDSA key [%s]", err)
+ }
+
+ // Private Key DER format
+ der, err := PrivateKeyToDER(key)
+ if err != nil {
+ t.Fatalf("Failed converting private key to DER [%s]", err)
+ }
+ keyFromDER, err := DERToPrivateKey(der)
+ if err != nil {
+ t.Fatalf("Failed converting DER to private key [%s]", err)
+ }
+ ecdsaKeyFromDer := keyFromDER.(*ecdsa.PrivateKey)
+ // TODO: check the curve
+ if key.D.Cmp(ecdsaKeyFromDer.D) != 0 {
+ t.Fatalf("Failed converting DER to private key. Invalid D.")
+ }
+ if key.X.Cmp(ecdsaKeyFromDer.X) != 0 {
+ t.Fatalf("Failed converting DER to private key. Invalid X coordinate.")
+ }
+ if key.Y.Cmp(ecdsaKeyFromDer.Y) != 0 {
+ t.Fatalf("Failed converting DER to private key. Invalid Y coordinate.")
+ }
+
+ // Private Key PEM format
+ pem, err := PrivateKeyToPEM(key, nil)
+ if err != nil {
+ t.Fatalf("Failed converting private key to PEM [%s]", err)
+ }
+ keyFromPEM, err := PEMtoPrivateKey(pem, nil)
+ if err != nil {
+ t.Fatalf("Failed converting DER to private key [%s]", err)
+ }
+ ecdsaKeyFromPEM := keyFromPEM.(*ecdsa.PrivateKey)
+ // TODO: check the curve
+ if key.D.Cmp(ecdsaKeyFromPEM.D) != 0 {
+ t.Fatalf("Failed converting PEM to private key. Invalid D.")
+ }
+ if key.X.Cmp(ecdsaKeyFromPEM.X) != 0 {
+ t.Fatalf("Failed converting PEM to private key. Invalid X coordinate.")
+ }
+ if key.Y.Cmp(ecdsaKeyFromPEM.Y) != 0 {
+ t.Fatalf("Failed converting PEM to private key. Invalid Y coordinate.")
+ }
+
+ // Nil Private Key <-> PEM
+ _, err = PrivateKeyToPEM(nil, nil)
+ if err == nil {
+ t.Fatalf("PublicKeyToPEM should fail on nil")
+ }
+
+ _, err = PEMtoPrivateKey(nil, nil)
+ if err == nil {
+ t.Fatalf("PEMtoPublicKey should fail on nil")
+ }
+
+ _, err = PEMtoPrivateKey([]byte{0, 1, 3, 4}, nil)
+ if err == nil {
+ t.Fatalf("PEMtoPublicKey should fail invalid PEM")
+ }
+
+ _, err = DERToPrivateKey(nil)
+ if err == nil {
+ t.Fatalf("DERToPrivateKey should fail on nil")
+ }
+
+ _, err = DERToPrivateKey([]byte{0, 1, 3, 4})
+ if err == nil {
+ t.Fatalf("DERToPrivateKey should fail on invalid DER")
+ }
+
+ _, err = PrivateKeyToDER(nil)
+ if err == nil {
+ t.Fatalf("DERToPrivateKey should fail on nil")
+ }
+
+ // Private Key Encrypted PEM format
+ encPEM, err := PrivateKeyToPEM(key, []byte("passwd"))
+ if err != nil {
+ t.Fatalf("Failed converting private key to encrypted PEM [%s]", err)
+ }
+ encKeyFromPEM, err := PEMtoPrivateKey(encPEM, []byte("passwd"))
+ if err != nil {
+ t.Fatalf("Failed converting DER to private key [%s]", err)
+ }
+ ecdsaKeyFromEncPEM := encKeyFromPEM.(*ecdsa.PrivateKey)
+ // TODO: check the curve
+ if key.D.Cmp(ecdsaKeyFromEncPEM.D) != 0 {
+ t.Fatalf("Failed converting encrypted PEM to private key. Invalid D.")
+ }
+ if key.X.Cmp(ecdsaKeyFromEncPEM.X) != 0 {
+ t.Fatalf("Failed converting encrypted PEM to private key. Invalid X coordinate.")
+ }
+ if key.Y.Cmp(ecdsaKeyFromEncPEM.Y) != 0 {
+ t.Fatalf("Failed converting encrypted PEM to private key. Invalid Y coordinate.")
+ }
+
+ // Public Key PEM format
+ pem, err = PublicKeyToPEM(&key.PublicKey, nil)
+ if err != nil {
+ t.Fatalf("Failed converting public key to PEM [%s]", err)
+ }
+ keyFromPEM, err = PEMtoPublicKey(pem, nil)
+ if err != nil {
+ t.Fatalf("Failed converting DER to public key [%s]", err)
+ }
+ ecdsaPkFromPEM := keyFromPEM.(*ecdsa.PublicKey)
+ // TODO: check the curve
+ if key.X.Cmp(ecdsaPkFromPEM.X) != 0 {
+ t.Fatalf("Failed converting PEM to private key. Invalid X coordinate.")
+ }
+ if key.Y.Cmp(ecdsaPkFromPEM.Y) != 0 {
+ t.Fatalf("Failed converting PEM to private key. Invalid Y coordinate.")
+ }
+
+ // Nil Public Key <-> PEM
+ _, err = PublicKeyToPEM(nil, nil)
+ if err == nil {
+ t.Fatalf("PublicKeyToPEM should fail on nil")
+ }
+
+ _, err = PEMtoPublicKey(nil, nil)
+ if err == nil {
+ t.Fatalf("PEMtoPublicKey should fail on nil")
+ }
+
+ _, err = PEMtoPublicKey([]byte{0, 1, 3, 4}, nil)
+ if err == nil {
+ t.Fatalf("PEMtoPublicKey should fail on invalid PEM")
+ }
+
+ // Public Key Encrypted PEM format
+ encPEM, err = PublicKeyToPEM(&key.PublicKey, []byte("passwd"))
+ if err != nil {
+ t.Fatalf("Failed converting private key to encrypted PEM [%s]", err)
+ }
+ pkFromEncPEM, err := PEMtoPublicKey(encPEM, []byte("passwd"))
+ if err != nil {
+ t.Fatalf("Failed converting DER to private key [%s]", err)
+ }
+ ecdsaPkFromEncPEM := pkFromEncPEM.(*ecdsa.PublicKey)
+ // TODO: check the curve
+ if key.X.Cmp(ecdsaPkFromEncPEM.X) != 0 {
+ t.Fatalf("Failed converting encrypted PEM to private key. Invalid X coordinate.")
+ }
+ if key.Y.Cmp(ecdsaPkFromEncPEM.Y) != 0 {
+ t.Fatalf("Failed converting encrypted PEM to private key. Invalid Y coordinate.")
+ }
+
+ _, err = PEMtoPublicKey(encPEM, []byte("passw"))
+ if err == nil {
+ t.Fatalf("PEMtoPublicKey should fail on wrong password")
+ }
+
+ _, err = PEMtoPublicKey(encPEM, []byte("passw"))
+ if err == nil {
+ t.Fatalf("PEMtoPublicKey should fail on nil password")
+ }
+
+ _, err = PEMtoPublicKey(nil, []byte("passwd"))
+ if err == nil {
+ t.Fatalf("PEMtoPublicKey should fail on nil PEM")
+ }
+
+ _, err = PEMtoPublicKey([]byte{0, 1, 3, 4}, []byte("passwd"))
+ if err == nil {
+ t.Fatalf("PEMtoPublicKey should fail on invalid PEM")
+ }
+
+ _, err = PEMtoPublicKey(nil, []byte("passw"))
+ if err == nil {
+ t.Fatalf("PEMtoPublicKey should fail on nil PEM and wrong password")
+ }
+}
+
+func TestRandom(t *testing.T) {
+ nonce, err := GetRandomNonce()
+ if err != nil {
+ t.Fatalf("Failed getting nonce [%s]", err)
+ }
+
+ if len(nonce) != NonceSize {
+ t.Fatalf("Invalid nonce size. Expecting [%d], was [%d]", NonceSize, len(nonce))
+ }
+}
+
+func TestHMAC(t *testing.T) {
+ key, err := GenAESKey()
+ if err != nil {
+ t.Fatalf("Failed generating AES key [%s]", err)
+ }
+
+ for i := 1; i < 100; i++ {
+ len, err := rand.Int(rand.Reader, big.NewInt(1024))
+ if err != nil {
+ t.Fatalf("Failed generating AES key [%s]", err)
+ }
+ msg, err := GetRandomBytes(int(len.Int64()) + 1)
+ if err != nil {
+ t.Fatalf("Failed generating AES key [%s]", err)
+ }
+
+ out1 := HMACAESTruncated(key, msg)
+ out2 := HMACTruncated(key, msg, AESKeyLength)
+ out3 := HMAC(key, msg)
+
+ if !reflect.DeepEqual(out1, out2) {
+ t.Fatalf("Wrong hmac output [%x][%x]", out1, out2)
+ }
+ if !reflect.DeepEqual(out2, out3[:AESKeyLength]) {
+ t.Fatalf("Wrong hmac output [%x][%x]", out1, out2)
+ }
+
+ }
+
+}
+
+func TestX509(t *testing.T) {
+
+ // Generate a self signed cert
+ der, key, err := NewSelfSignedCert()
+ if err != nil {
+ t.Fatalf("Failed genereting self signed cert")
+ }
+
+ // Test DERCertToPEM
+ pem := DERCertToPEM(der)
+ certFromPEM, derFromPem, err := PEMtoCertificateAndDER(pem)
+ if err != nil {
+ t.Fatalf("Failed converting PEM to (x509, DER) [%s]", err)
+ }
+ if !reflect.DeepEqual(certFromPEM.Raw, der) {
+ t.Fatalf("Invalid der from PEM [%x][%x]", der, certFromPEM.Raw)
+ }
+ if !reflect.DeepEqual(der, derFromPem) {
+ t.Fatalf("Invalid der from PEM [%x][%x]", der, derFromPem)
+ }
+
+ if err := CheckCertPKAgainstSK(certFromPEM, key); err != nil {
+ t.Fatalf("Failed checking cert vk against sk [%s]", err)
+ }
+
+ // Test PEMtoDER
+ if derFromPem, err = PEMtoDER(pem); err != nil {
+ t.Fatalf("Failed converting PEM to (DER) [%s]", err)
+ }
+ if !reflect.DeepEqual(der, derFromPem) {
+ t.Fatalf("Invalid der from PEM [%x][%x]", der, derFromPem)
+ }
+
+ // Test PEMtoCertificate
+ if certFromPEM, err = PEMtoCertificate(pem); err != nil {
+ t.Fatalf("Failed converting PEM to (x509) [%s]", err)
+ }
+ if !reflect.DeepEqual(certFromPEM.Raw, der) {
+ t.Fatalf("Invalid der from PEM [%x][%x]", der, certFromPEM.Raw)
+ }
+ if err := CheckCertPKAgainstSK(certFromPEM, key); err != nil {
+ t.Fatalf("Failed checking cert vk against sk [%s]", err)
+ }
+
+ // Test DERToX509Certificate
+ if certFromPEM, err = DERToX509Certificate(der); err != nil {
+ t.Fatalf("Failed converting DER to (x509) [%s]", err)
+ }
+ if !reflect.DeepEqual(certFromPEM.Raw, der) {
+ t.Fatalf("Invalid x509 from PEM [%x][%x]", der, certFromPEM.Raw)
+ }
+ if err := CheckCertPKAgainstSK(certFromPEM, key); err != nil {
+ t.Fatalf("Failed checking cert vk against sk [%s]", err)
+ }
+
+ // Test errors
+ if _, err = DERToX509Certificate(pem); err == nil {
+ t.Fatalf("Converting DER to (x509) should fail on PEM [%s]", err)
+ }
+
+ if _, err = PEMtoCertificate(der); err == nil {
+ t.Fatalf("Converting PEM to (x509) should fail on DER [%s]", err)
+ }
+
+ certFromPEM.PublicKey = nil
+ if err := CheckCertPKAgainstSK(certFromPEM, key); err == nil {
+ t.Fatalf("Checking cert vk against sk shoud failed. Invalid VK [%s]", err)
+ }
+}
diff --git a/core/crypto/primitives/random.go b/core/crypto/primitives/random.go
new file mode 100644
index 00000000000..43f4e6655cf
--- /dev/null
+++ b/core/crypto/primitives/random.go
@@ -0,0 +1,37 @@
+/*
+Copyright IBM Corp. 2016 All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package primitives
+
+import "crypto/rand"
+
+// GetRandomBytes returns len random looking bytes
+func GetRandomBytes(len int) ([]byte, error) {
+ key := make([]byte, len)
+
+ // TODO: rand could fill less bytes then len
+ _, err := rand.Read(key)
+ if err != nil {
+ return nil, err
+ }
+
+ return key, nil
+}
+
+// GetRandomNonce returns a random byte array of length NonceSize
+func GetRandomNonce() ([]byte, error) {
+ return GetRandomBytes(NonceSize)
+}
diff --git a/core/crypto/primitives/x509.go b/core/crypto/primitives/x509.go
new file mode 100644
index 00000000000..977ad913f38
--- /dev/null
+++ b/core/crypto/primitives/x509.go
@@ -0,0 +1,257 @@
+/*
+Copyright IBM Corp. 2016 All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package primitives
+
+import (
+ "crypto/ecdsa"
+ "crypto/rand"
+ "crypto/rsa"
+ "crypto/x509"
+ "crypto/x509/pkix"
+ "encoding/asn1"
+ "encoding/pem"
+ "errors"
+ "math/big"
+ "net"
+ "time"
+
+ "github.com/hyperledger/fabric/core/crypto/utils"
+)
+
+var (
+ // TCertEncTCertIndex oid for TCertIndex
+ TCertEncTCertIndex = asn1.ObjectIdentifier{1, 2, 3, 4, 5, 6, 7}
+
+ // TCertEncEnrollmentID is the ASN1 object identifier of the TCert index.
+ TCertEncEnrollmentID = asn1.ObjectIdentifier{1, 2, 3, 4, 5, 6, 8}
+
+ // TCertEncAttributesBase is the base ASN1 object identifier for attributes.
+ // When generating an extension to include the attribute an index will be
+ // appended to this Object Identifier.
+ TCertEncAttributesBase = asn1.ObjectIdentifier{1, 2, 3, 4, 5, 6}
+
+ // TCertAttributesHeaders is the ASN1 object identifier of attributes header.
+ TCertAttributesHeaders = asn1.ObjectIdentifier{1, 2, 3, 4, 5, 6, 9}
+)
+
+// DERToX509Certificate converts der to x509
+func DERToX509Certificate(asn1Data []byte) (*x509.Certificate, error) {
+ return x509.ParseCertificate(asn1Data)
+}
+
+// PEMtoCertificate converts pem to x509
+func PEMtoCertificate(raw []byte) (*x509.Certificate, error) {
+ block, _ := pem.Decode(raw)
+ if block == nil {
+ return nil, errors.New("No PEM block available")
+ }
+
+ if block.Type != "CERTIFICATE" || len(block.Headers) != 0 {
+ return nil, errors.New("Not a valid CERTIFICATE PEM block")
+ }
+
+ cert, err := x509.ParseCertificate(block.Bytes)
+ if err != nil {
+ return nil, err
+ }
+
+ return cert, nil
+}
+
+// PEMtoDER converts pem to der
+func PEMtoDER(raw []byte) ([]byte, error) {
+ block, _ := pem.Decode(raw)
+ if block == nil {
+ return nil, errors.New("No PEM block available")
+ }
+
+ if block.Type != "CERTIFICATE" || len(block.Headers) != 0 {
+ return nil, errors.New("Not a valid CERTIFICATE PEM block")
+ }
+
+ return block.Bytes, nil
+}
+
+// PEMtoCertificateAndDER converts pem to x509 and der
+func PEMtoCertificateAndDER(raw []byte) (*x509.Certificate, []byte, error) {
+ block, _ := pem.Decode(raw)
+ if block == nil {
+ return nil, nil, errors.New("No PEM block available")
+ }
+
+ if block.Type != "CERTIFICATE" || len(block.Headers) != 0 {
+ return nil, nil, errors.New("Not a valid CERTIFICATE PEM block")
+ }
+
+ cert, err := x509.ParseCertificate(block.Bytes)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ return cert, block.Bytes, nil
+}
+
+// DERCertToPEM converts der to pem
+func DERCertToPEM(der []byte) []byte {
+ return pem.EncodeToMemory(
+ &pem.Block{
+ Type: "CERTIFICATE",
+ Bytes: der,
+ },
+ )
+}
+
+// GetCriticalExtension returns a requested critical extension. It also remove it from the list
+// of unhandled critical extensions
+func GetCriticalExtension(cert *x509.Certificate, oid asn1.ObjectIdentifier) ([]byte, error) {
+ for i, ext := range cert.UnhandledCriticalExtensions {
+ if utils.IntArrayEquals(ext, oid) {
+ cert.UnhandledCriticalExtensions = append(cert.UnhandledCriticalExtensions[:i], cert.UnhandledCriticalExtensions[i+1:]...)
+
+ break
+ }
+ }
+
+ for _, ext := range cert.Extensions {
+ if utils.IntArrayEquals(ext.Id, oid) {
+ return ext.Value, nil
+ }
+ }
+
+ return nil, errors.New("Failed retrieving extension.")
+}
+
+// NewSelfSignedCert create a self signed certificate
+func NewSelfSignedCert() ([]byte, interface{}, error) {
+ privKey, err := NewECDSAKey()
+ if err != nil {
+ return nil, nil, err
+ }
+
+ testExtKeyUsage := []x509.ExtKeyUsage{x509.ExtKeyUsageClientAuth, x509.ExtKeyUsageServerAuth}
+ testUnknownExtKeyUsage := []asn1.ObjectIdentifier{[]int{1, 2, 3}, []int{2, 59, 1}}
+ extraExtensionData := []byte("extra extension")
+ commonName := "test.example.com"
+ template := x509.Certificate{
+ SerialNumber: big.NewInt(1),
+ Subject: pkix.Name{
+ CommonName: commonName,
+ Organization: []string{"Σ Acme Co"},
+ Country: []string{"US"},
+ ExtraNames: []pkix.AttributeTypeAndValue{
+ {
+ Type: []int{2, 5, 4, 42},
+ Value: "Gopher",
+ },
+ // This should override the Country, above.
+ {
+ Type: []int{2, 5, 4, 6},
+ Value: "NL",
+ },
+ },
+ },
+ NotBefore: time.Now().Add(-1 * time.Hour),
+ NotAfter: time.Now().Add(1 * time.Hour),
+
+ SignatureAlgorithm: x509.ECDSAWithSHA384,
+
+ SubjectKeyId: []byte{1, 2, 3, 4},
+ KeyUsage: x509.KeyUsageCertSign,
+
+ ExtKeyUsage: testExtKeyUsage,
+ UnknownExtKeyUsage: testUnknownExtKeyUsage,
+
+ BasicConstraintsValid: true,
+ IsCA: true,
+
+ OCSPServer: []string{"http://ocsp.example.com"},
+ IssuingCertificateURL: []string{"http://crt.example.com/ca1.crt"},
+
+ DNSNames: []string{"test.example.com"},
+ EmailAddresses: []string{"gopher@golang.org"},
+ IPAddresses: []net.IP{net.IPv4(127, 0, 0, 1).To4(), net.ParseIP("2001:4860:0:2001::68")},
+
+ PolicyIdentifiers: []asn1.ObjectIdentifier{[]int{1, 2, 3}},
+ PermittedDNSDomains: []string{".example.com", "example.com"},
+
+ CRLDistributionPoints: []string{"http://crl1.example.com/ca1.crl", "http://crl2.example.com/ca1.crl"},
+
+ ExtraExtensions: []pkix.Extension{
+ {
+ Id: []int{1, 2, 3, 4},
+ Value: extraExtensionData,
+ },
+ },
+ }
+
+ cert, err := x509.CreateCertificate(rand.Reader, &template, &template, &privKey.PublicKey, privKey)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ return cert, privKey, nil
+}
+
+// CheckCertPKAgainstSK checks certificate's publickey against the passed secret key
+func CheckCertPKAgainstSK(x509Cert *x509.Certificate, privateKey interface{}) error {
+ switch pub := x509Cert.PublicKey.(type) {
+ case *rsa.PublicKey:
+ priv, ok := privateKey.(*rsa.PrivateKey)
+ if !ok {
+ return errors.New("Private key type does not match public key type")
+ }
+ if pub.N.Cmp(priv.N) != 0 {
+ return errors.New("Private key does not match public key")
+ }
+ case *ecdsa.PublicKey:
+ priv, ok := privateKey.(*ecdsa.PrivateKey)
+ if !ok {
+ return errors.New("Private key type does not match public key type")
+
+ }
+ if pub.X.Cmp(priv.X) != 0 || pub.Y.Cmp(priv.Y) != 0 {
+ return errors.New("Private key does not match public key")
+ }
+ default:
+ return errors.New("Unknown public key algorithm")
+ }
+
+ return nil
+}
+
+// CheckCertAgainRoot check the validity of the passed certificate against the passed certPool
+func CheckCertAgainRoot(x509Cert *x509.Certificate, certPool *x509.CertPool) ([][]*x509.Certificate, error) {
+ opts := x509.VerifyOptions{
+ // TODO DNSName: "test.example.com",
+ Roots: certPool,
+ }
+
+ return x509Cert.Verify(opts)
+}
+
+// CheckCertAgainstSKAndRoot checks the passed certificate against the passed secretkey and certPool
+func CheckCertAgainstSKAndRoot(x509Cert *x509.Certificate, privateKey interface{}, certPool *x509.CertPool) error {
+ if err := CheckCertPKAgainstSK(x509Cert, privateKey); err != nil {
+ return err
+ }
+
+ if _, err := CheckCertAgainRoot(x509Cert, certPool); err != nil {
+ return err
+ }
+
+ return nil
+}
diff --git a/core/crypto/utils/conf.go b/core/crypto/utils/conf.go
new file mode 100644
index 00000000000..00afec0a160
--- /dev/null
+++ b/core/crypto/utils/conf.go
@@ -0,0 +1,49 @@
+/*
+Copyright IBM Corp. 2016 All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package utils
+
+import (
+ "fmt"
+
+ "github.com/spf13/viper"
+)
+
+// NodeConfiguration used for testing
+type NodeConfiguration struct {
+ Type string
+ Name string
+}
+
+// GetEnrollmentID returns the enrollment ID
+func (conf *NodeConfiguration) GetEnrollmentID() string {
+ key := "tests.crypto.users." + conf.Name + ".enrollid"
+ value := viper.GetString(key)
+ if value == "" {
+ panic(fmt.Errorf("Enrollment id not specified in configuration file. Please check that property '%s' is set", key))
+ }
+ return value
+}
+
+// GetEnrollmentPWD returns the enrollment PWD
+func (conf *NodeConfiguration) GetEnrollmentPWD() string {
+ key := "tests.crypto.users." + conf.Name + ".enrollpw"
+ value := viper.GetString(key)
+ if value == "" {
+ panic(fmt.Errorf("Enrollment id not specified in configuration file. Please check that property '%s' is set", key))
+ }
+ return value
+}
diff --git a/core/crypto/utils/errs.go b/core/crypto/utils/errs.go
new file mode 100644
index 00000000000..2e405f332f3
--- /dev/null
+++ b/core/crypto/utils/errs.go
@@ -0,0 +1,96 @@
+/*
+Copyright IBM Corp. 2016 All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package utils
+
+import "errors"
+
+var (
+ // ErrRegistrationRequired Registration to the Membership Service required.
+ ErrRegistrationRequired = errors.New("Registration to the Membership Service required.")
+
+ // ErrNotInitialized Initialization required
+ ErrNotInitialized = errors.New("Initialization required.")
+
+ // ErrAlreadyInitialized Already initialized
+ ErrAlreadyInitialized = errors.New("Already initialized.")
+
+ // ErrAlreadyRegistered Already registered
+ ErrAlreadyRegistered = errors.New("Already registered.")
+
+ // ErrTransactionMissingCert Transaction missing certificate or signature
+ ErrTransactionMissingCert = errors.New("Transaction missing certificate or signature.")
+
+ // ErrInvalidTransactionSignature Invalid Transaction Signature
+ ErrInvalidTransactionSignature = errors.New("Invalid Transaction Signature.")
+
+ // ErrTransactionCertificate Missing Transaction Certificate
+ ErrTransactionCertificate = errors.New("Missing Transaction Certificate.")
+
+ // ErrTransactionSignature Missing Transaction Signature
+ ErrTransactionSignature = errors.New("Missing Transaction Signature.")
+
+ // ErrInvalidSignature Invalid Signature
+ ErrInvalidSignature = errors.New("Invalid Signature.")
+
+ // ErrInvalidKey Invalid key
+ ErrInvalidKey = errors.New("Invalid key.")
+
+ // ErrInvalidReference Invalid reference
+ ErrInvalidReference = errors.New("Invalid reference.")
+
+ // ErrNilArgument Invalid reference
+ ErrNilArgument = errors.New("Nil argument.")
+
+ // ErrNotImplemented Not implemented
+ ErrNotImplemented = errors.New("Not implemented.")
+
+ // ErrKeyStoreAlreadyInitialized Keystore already Initilized
+ ErrKeyStoreAlreadyInitialized = errors.New("Keystore already Initilized.")
+
+ // ErrEncrypt Encryption failed
+ ErrEncrypt = errors.New("Encryption failed.")
+
+ // ErrDecrypt Decryption failed
+ ErrDecrypt = errors.New("Decryption failed.")
+
+ // ErrDifferentChaincodeID ChaincodeIDs are different
+ ErrDifferentChaincodeID = errors.New("ChaincodeIDs are different.")
+
+ // ErrDifferrentConfidentialityProtocolVersion different confidentiality protocol versions
+ ErrDifferrentConfidentialityProtocolVersion = errors.New("Confidentiality protocol versions are different.")
+
+ // ErrInvalidConfidentialityLevel Invalid confidentiality level
+ ErrInvalidConfidentialityLevel = errors.New("Invalid confidentiality level")
+
+ // ErrInvalidConfidentialityProtocol Invalid confidentiality level
+ ErrInvalidConfidentialityProtocol = errors.New("Invalid confidentiality protocol")
+
+ // ErrInvalidTransactionType Invalid transaction type
+ ErrInvalidTransactionType = errors.New("Invalid transaction type")
+
+ // ErrInvalidProtocolVersion Invalid protocol version
+ ErrInvalidProtocolVersion = errors.New("Invalid protocol version")
+)
+
+// ErrToString converts and error to a string. If the error is nil, it returns the string ""
+func ErrToString(err error) string {
+ if err != nil {
+ return err.Error()
+ }
+
+ return ""
+}
diff --git a/core/crypto/utils/io.go b/core/crypto/utils/io.go
new file mode 100644
index 00000000000..f7c042f50b4
--- /dev/null
+++ b/core/crypto/utils/io.go
@@ -0,0 +1,112 @@
+/*
+Copyright IBM Corp. 2016 All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package utils
+
+import (
+ "encoding/base64"
+ "io"
+ "os"
+ "path/filepath"
+)
+
+// DirMissingOrEmpty checks is a directory is missin or empty
+func DirMissingOrEmpty(path string) (bool, error) {
+ dirExists, err := DirExists(path)
+ if err != nil {
+ return false, err
+ }
+ if !dirExists {
+ return true, nil
+ }
+
+ dirEmpty, err := DirEmpty(path)
+ if err != nil {
+ return false, err
+ }
+ if dirEmpty {
+ return true, nil
+ }
+ return false, nil
+}
+
+// DirExists checks if a directory exists
+func DirExists(path string) (bool, error) {
+ _, err := os.Stat(path)
+ if err == nil {
+ return true, nil
+ }
+ if os.IsNotExist(err) {
+ return false, nil
+ }
+ return false, err
+}
+
+// DirEmpty checks if a directory is empty
+func DirEmpty(path string) (bool, error) {
+ f, err := os.Open(path)
+ if err != nil {
+ return false, err
+ }
+ defer f.Close()
+
+ _, err = f.Readdir(1)
+ if err == io.EOF {
+ return true, nil
+ }
+ return false, err
+}
+
+// FileMissing checks if a file is missing
+func FileMissing(path string, name string) (bool, error) {
+ _, err := os.Stat(filepath.Join(path, name))
+ if err != nil {
+ return true, err
+ }
+ return false, nil
+}
+
+// FilePathMissing returns true if the path is missing, false otherwise.
+func FilePathMissing(path string) (bool, error) {
+ _, err := os.Stat(path)
+ if err != nil {
+ return true, err
+ }
+ return false, nil
+}
+
+// DecodeBase64 decodes from Base64
+func DecodeBase64(in string) ([]byte, error) {
+ return base64.StdEncoding.DecodeString(in)
+}
+
+// EncodeBase64 encodes to Base64
+func EncodeBase64(in []byte) string {
+ return base64.StdEncoding.EncodeToString(in)
+}
+
+// IntArrayEquals checks if the arrays of ints are the same
+func IntArrayEquals(a []int, b []int) bool {
+ if len(a) != len(b) {
+ return false
+ }
+ for i, v := range a {
+ if v != b[i] {
+ return false
+ }
+ }
+ return true
+}
diff --git a/core/crypto/utils/slice.go b/core/crypto/utils/slice.go
new file mode 100644
index 00000000000..59e5374c386
--- /dev/null
+++ b/core/crypto/utils/slice.go
@@ -0,0 +1,25 @@
+/*
+Copyright IBM Corp. 2016 All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package utils
+
+// Clone clones the passed slice
+func Clone(src []byte) []byte {
+ clone := make([]byte, len(src))
+ copy(clone, src)
+
+ return clone
+}
diff --git a/core/crypto/validator.go b/core/crypto/validator.go
new file mode 100644
index 00000000000..d8b74a1e605
--- /dev/null
+++ b/core/crypto/validator.go
@@ -0,0 +1,160 @@
+/*
+Copyright IBM Corp. 2016 All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package crypto
+
+import (
+ "sync"
+
+ "github.com/hyperledger/fabric/core/crypto/utils"
+)
+
+// Private type and variables
+
+type validatorEntry struct {
+ validator Peer
+ counter int64
+}
+
+var (
+ // Map of initialized validators
+ validators = make(map[string]validatorEntry)
+
+ // Sync
+ mutex sync.Mutex
+)
+
+// Public Methods
+
+// RegisterValidator registers a validator to the PKI infrastructure
+func RegisterValidator(name string, pwd []byte, enrollID, enrollPWD string) error {
+ mutex.Lock()
+ defer mutex.Unlock()
+
+ log.Infof("Registering validator [%s] with name [%s]...", enrollID, name)
+
+ if _, ok := validators[name]; ok {
+ log.Infof("Registering validator [%s] with name [%s]...done. Already initialized.", enrollID, name)
+
+ return nil
+ }
+
+ validator := newValidator()
+ if err := validator.register(name, pwd, enrollID, enrollPWD, nil); err != nil {
+ if err != utils.ErrAlreadyRegistered && err != utils.ErrAlreadyInitialized {
+ log.Errorf("Failed registering validator [%s] with name [%s] [%s].", enrollID, name, err)
+ return err
+ }
+ log.Infof("Registering validator [%s] with name [%s]...done. Already registered or initiliazed.", enrollID, name)
+ }
+ err := validator.close()
+ if err != nil {
+ // It is not necessary to report this error to the caller
+ log.Warningf("Registering validator [%s] with name [%s]. Failed closing [%s].", enrollID, name, err)
+ }
+
+ log.Infof("Registering validator [%s] with name [%s]...done!", enrollID, name)
+
+ return nil
+}
+
+// InitValidator initializes a validator named name with password pwd
+func InitValidator(name string, pwd []byte) (Peer, error) {
+ mutex.Lock()
+ defer mutex.Unlock()
+
+ log.Infof("Initializing validator [%s]...", name)
+
+ if entry, ok := validators[name]; ok {
+ log.Infof("Validator already initiliazied [%s]. Increasing counter from [%d]", name, validators[name].counter)
+ entry.counter++
+ validators[name] = entry
+
+ return validators[name].validator, nil
+ }
+
+ validator := newValidator()
+ if err := validator.init(name, pwd, nil); err != nil {
+ log.Errorf("Failed validator initialization [%s]: [%s]", name, err)
+
+ return nil, err
+ }
+
+ validators[name] = validatorEntry{validator, 1}
+ log.Infof("Initializing validator [%s]...done!", name)
+
+ return validator, nil
+}
+
+// CloseValidator releases all the resources allocated by the validator
+func CloseValidator(peer Peer) error {
+ mutex.Lock()
+ defer mutex.Unlock()
+
+ return closeValidatorInternal(peer, false)
+}
+
+// CloseAllValidators closes all the validators initialized so far
+func CloseAllValidators() (bool, []error) {
+ mutex.Lock()
+ defer mutex.Unlock()
+
+ log.Info("Closing all validators...")
+
+ errs := make([]error, len(validators))
+ for _, value := range validators {
+ err := closeValidatorInternal(value.validator, true)
+
+ errs = append(errs, err)
+ }
+
+ log.Info("Closing all validators...done!")
+
+ return len(errs) != 0, errs
+}
+
+// Private Methods
+
+func newValidator() *validatorImpl {
+ return &validatorImpl{&peerImpl{&nodeImpl{}, sync.RWMutex{}, nil}, nil}
+}
+
+func closeValidatorInternal(peer Peer, force bool) error {
+ if peer == nil {
+ return utils.ErrNilArgument
+ }
+
+ name := peer.GetName()
+ log.Infof("Closing validator [%s]...", name)
+ entry, ok := validators[name]
+ if !ok {
+ return utils.ErrInvalidReference
+ }
+ if entry.counter == 1 || force {
+ defer delete(validators, name)
+ err := validators[name].validator.(*validatorImpl).close()
+ log.Infof("Closing validator [%s]...done! [%s].", name, utils.ErrToString(err))
+
+ return err
+ }
+
+ // decrease counter
+ entry.counter--
+ validators[name] = entry
+ log.Infof("Closing validator [%s]...decreased counter at [%d].", name, validators[name].counter)
+
+ return nil
+}
diff --git a/core/crypto/validator_confidentiality.go b/core/crypto/validator_confidentiality.go
new file mode 100644
index 00000000000..80506e03d07
--- /dev/null
+++ b/core/crypto/validator_confidentiality.go
@@ -0,0 +1,135 @@
+/*
+Copyright IBM Corp. 2016 All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package crypto
+
+import (
+ "encoding/asn1"
+ "errors"
+
+ "github.com/golang/protobuf/proto"
+ "github.com/hyperledger/fabric/core/crypto/primitives"
+ "github.com/hyperledger/fabric/core/crypto/utils"
+ obc "github.com/hyperledger/fabric/protos"
+)
+
+func (validator *validatorImpl) deepCloneTransaction(tx *obc.Transaction) (*obc.Transaction, error) {
+ raw, err := proto.Marshal(tx)
+ if err != nil {
+ validator.Errorf("Failed cloning transaction [%s].", err.Error())
+
+ return nil, err
+ }
+
+ clone := &obc.Transaction{}
+ err = proto.Unmarshal(raw, clone)
+ if err != nil {
+ validator.Errorf("Failed cloning transaction [%s].", err.Error())
+
+ return nil, err
+ }
+
+ return clone, nil
+}
+
+func (validator *validatorImpl) deepCloneAndDecryptTx(tx *obc.Transaction) (*obc.Transaction, error) {
+ switch tx.ConfidentialityProtocolVersion {
+ case "1.2":
+ return validator.deepCloneAndDecryptTx1_2(tx)
+ }
+ return nil, utils.ErrInvalidProtocolVersion
+}
+
+func (validator *validatorImpl) deepCloneAndDecryptTx1_2(tx *obc.Transaction) (*obc.Transaction, error) {
+ if tx.Nonce == nil || len(tx.Nonce) == 0 {
+ return nil, errors.New("Failed decrypting payload. Invalid nonce.")
+ }
+
+ // clone tx
+ clone, err := validator.deepCloneTransaction(tx)
+ if err != nil {
+ validator.Errorf("Failed deep cloning [%s].", err.Error())
+ return nil, err
+ }
+
+ var ccPrivateKey primitives.PrivateKey
+
+ validator.Debugf("Transaction type [%s].", tx.Type.String())
+
+ validator.Debug("Extract transaction key...")
+
+ // Derive transaction key
+ cipher, err := validator.eciesSPI.NewAsymmetricCipherFromPrivateKey(validator.chainPrivateKey)
+ if err != nil {
+ validator.Errorf("Failed init decryption engine [%s].", err.Error())
+ return nil, err
+ }
+
+ msgToValidatorsRaw, err := cipher.Process(tx.ToValidators)
+ if err != nil {
+ validator.Errorf("Failed decrypting message to validators [% x]: [%s].", tx.ToValidators, err.Error())
+ return nil, err
+ }
+
+ msgToValidators := new(chainCodeValidatorMessage1_2)
+ _, err = asn1.Unmarshal(msgToValidatorsRaw, msgToValidators)
+ if err != nil {
+ validator.Errorf("Failed unmarshalling message to validators [%s].", err.Error())
+ return nil, err
+ }
+
+ validator.Debugf("Deserializing transaction key [% x].", msgToValidators.PrivateKey)
+ ccPrivateKey, err = validator.eciesSPI.DeserializePrivateKey(msgToValidators.PrivateKey)
+ if err != nil {
+ validator.Errorf("Failed deserializing transaction key [%s].", err.Error())
+ return nil, err
+ }
+
+ validator.Debug("Extract transaction key...done")
+
+ cipher, err = validator.eciesSPI.NewAsymmetricCipherFromPrivateKey(ccPrivateKey)
+ if err != nil {
+ validator.Errorf("Failed init transaction decryption engine [%s].", err.Error())
+ return nil, err
+ }
+ // Decrypt Payload
+ payload, err := cipher.Process(clone.Payload)
+ if err != nil {
+ validator.Errorf("Failed decrypting payload [%s].", err.Error())
+ return nil, err
+ }
+ clone.Payload = payload
+
+ // Decrypt ChaincodeID
+ chaincodeID, err := cipher.Process(clone.ChaincodeID)
+ if err != nil {
+ validator.Errorf("Failed decrypting chaincode [%s].", err.Error())
+ return nil, err
+ }
+ clone.ChaincodeID = chaincodeID
+
+ // Decrypt metadata
+ if len(clone.Metadata) != 0 {
+ metadata, err := cipher.Process(clone.Metadata)
+ if err != nil {
+ validator.Errorf("Failed decrypting metadata [%s].", err.Error())
+ return nil, err
+ }
+ clone.Metadata = metadata
+ }
+
+ return clone, nil
+}
diff --git a/core/crypto/validator_impl.go b/core/crypto/validator_impl.go
new file mode 100644
index 00000000000..403e05c5492
--- /dev/null
+++ b/core/crypto/validator_impl.go
@@ -0,0 +1,174 @@
+/*
+Copyright IBM Corp. 2016 All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package crypto
+
+import (
+ "crypto/ecdsa"
+
+ "fmt"
+
+ "github.com/hyperledger/fabric/core/crypto/primitives"
+ "github.com/hyperledger/fabric/core/crypto/utils"
+ obc "github.com/hyperledger/fabric/protos"
+)
+
+// Public Struct
+
+type validatorImpl struct {
+ *peerImpl
+
+ // Chain
+ chainPrivateKey primitives.PrivateKey
+}
+
+// TransactionPreValidation verifies that the transaction is
+// well formed with the respect to the security layer
+// prescriptions (i.e. signature verification).
+func (validator *validatorImpl) TransactionPreValidation(tx *obc.Transaction) (*obc.Transaction, error) {
+ if !validator.isInitialized {
+ return nil, utils.ErrNotInitialized
+ }
+
+ return validator.peerImpl.TransactionPreValidation(tx)
+}
+
+// TransactionPreValidation verifies that the transaction is
+// well formed with the respect to the security layer
+// prescriptions (i.e. signature verification). If this is the case,
+// the method prepares the transaction to be executed.
+func (validator *validatorImpl) TransactionPreExecution(tx *obc.Transaction) (*obc.Transaction, error) {
+ if !validator.isInitialized {
+ return nil, utils.ErrNotInitialized
+ }
+
+ // validator.debug("Pre executing [%s].", tx.String())
+ validator.Debugf("Tx confdential level [%s].", tx.ConfidentialityLevel.String())
+
+ switch tx.ConfidentialityLevel {
+ case obc.ConfidentialityLevel_PUBLIC:
+ // Nothing to do here!
+
+ return tx, nil
+ case obc.ConfidentialityLevel_CONFIDENTIAL:
+ validator.Debug("Clone and Decrypt.")
+
+ // Clone the transaction and decrypt it
+ newTx, err := validator.deepCloneAndDecryptTx(tx)
+ if err != nil {
+ validator.Errorf("Failed decrypting [%s].", err.Error())
+
+ return nil, err
+ }
+
+ return newTx, nil
+ default:
+ return nil, utils.ErrInvalidConfidentialityLevel
+ }
+}
+
+// Sign signs msg with this validator's signing key and outputs
+// the signature if no error occurred.
+func (validator *validatorImpl) Sign(msg []byte) ([]byte, error) {
+ return validator.signWithEnrollmentKey(msg)
+}
+
+// Verify checks that signature if a valid signature of message under vkID's verification key.
+// If the verification succeeded, Verify returns nil meaning no error occurred.
+// If vkID is nil, then the signature is verified against this validator's verification key.
+func (validator *validatorImpl) Verify(vkID, signature, message []byte) error {
+ if len(vkID) == 0 {
+ return fmt.Errorf("Invalid peer id. It is empty.")
+ }
+ if len(signature) == 0 {
+ return fmt.Errorf("Invalid signature. It is empty.")
+ }
+ if len(message) == 0 {
+ return fmt.Errorf("Invalid message. It is empty.")
+ }
+
+ cert, err := validator.getEnrollmentCert(vkID)
+ if err != nil {
+ validator.Errorf("Failed getting enrollment cert for [% x]: [%s]", vkID, err)
+
+ return err
+ }
+
+ vk := cert.PublicKey.(*ecdsa.PublicKey)
+
+ ok, err := validator.verify(vk, message, signature)
+ if err != nil {
+ validator.Errorf("Failed verifying signature for [% x]: [%s]", vkID, err)
+
+ return err
+ }
+
+ if !ok {
+ validator.Errorf("Failed invalid signature for [% x]", vkID)
+
+ return utils.ErrInvalidSignature
+ }
+
+ return nil
+}
+
+// Private Methods
+
+func (validator *validatorImpl) register(id string, pwd []byte, enrollID, enrollPWD string, regFunc registerFunc) error {
+ // Register node
+ if err := validator.peerImpl.register(NodeValidator, id, pwd, enrollID, enrollPWD, nil); err != nil {
+ validator.Errorf("Failed registering [%s]: [%s]", enrollID, err)
+ return err
+ }
+
+ return nil
+}
+
+func (validator *validatorImpl) init(name string, pwd []byte, regFunc registerFunc) error {
+
+ validatorInitFunc := func(eType NodeType, name string, pwd []byte) error {
+ // Init crypto engine
+ err := validator.initCryptoEngine()
+ if err != nil {
+ validator.Errorf("Failed initiliazing crypto engine [%s].", err.Error())
+ return err
+ }
+
+ return nil
+ }
+
+ if err := validator.peerImpl.init(NodeValidator, name, pwd, validatorInitFunc); err != nil {
+ return err
+ }
+
+ return nil
+}
+
+func (validator *validatorImpl) initCryptoEngine() (err error) {
+ // Init chain publicKey
+ validator.chainPrivateKey, err = validator.eciesSPI.NewPrivateKey(
+ nil, validator.enrollChainKey.(*ecdsa.PrivateKey),
+ )
+ if err != nil {
+ return
+ }
+
+ return
+}
+
+func (validator *validatorImpl) close() error {
+ return validator.peerImpl.close()
+}
diff --git a/core/crypto/validator_state.go b/core/crypto/validator_state.go
new file mode 100644
index 00000000000..d8f169a87f9
--- /dev/null
+++ b/core/crypto/validator_state.go
@@ -0,0 +1,320 @@
+/*
+Copyright IBM Corp. 2016 All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package crypto
+
+import (
+ "errors"
+ "reflect"
+
+ "crypto/aes"
+ "crypto/cipher"
+ "encoding/asn1"
+ "encoding/binary"
+
+ "github.com/hyperledger/fabric/core/crypto/primitives"
+ "github.com/hyperledger/fabric/core/crypto/utils"
+ obc "github.com/hyperledger/fabric/protos"
+)
+
+func (validator *validatorImpl) GetStateEncryptor(deployTx, executeTx *obc.Transaction) (StateEncryptor, error) {
+ switch executeTx.ConfidentialityProtocolVersion {
+ case "1.2":
+ return validator.getStateEncryptor1_2(deployTx, executeTx)
+ }
+
+ return nil, utils.ErrInvalidConfidentialityLevel
+}
+
+func (validator *validatorImpl) getStateEncryptor1_2(deployTx, executeTx *obc.Transaction) (StateEncryptor, error) {
+ // Check nonce
+ if deployTx.Nonce == nil || len(deployTx.Nonce) == 0 {
+ return nil, errors.New("Invalid deploy nonce.")
+ }
+ if executeTx.Nonce == nil || len(executeTx.Nonce) == 0 {
+ return nil, errors.New("Invalid invoke nonce.")
+ }
+ // Check ChaincodeID
+ if deployTx.ChaincodeID == nil {
+ return nil, errors.New("Invalid deploy chaincodeID.")
+ }
+ if executeTx.ChaincodeID == nil {
+ return nil, errors.New("Invalid execute chaincodeID.")
+ }
+ // Check that deployTx and executeTx refers to the same chaincode
+ if !reflect.DeepEqual(deployTx.ChaincodeID, executeTx.ChaincodeID) {
+ return nil, utils.ErrDifferentChaincodeID
+ }
+ // Check the confidentiality protocol version
+ if deployTx.ConfidentialityProtocolVersion != executeTx.ConfidentialityProtocolVersion {
+ return nil, utils.ErrDifferrentConfidentialityProtocolVersion
+ }
+
+ validator.Debugf("Parsing transaction. Type [%s]. Confidentiality Protocol Version [%s]", executeTx.Type.String(), executeTx.ConfidentialityProtocolVersion)
+
+ deployStateKey, err := validator.getStateKeyFromTransaction(deployTx)
+
+ if executeTx.Type == obc.Transaction_CHAINCODE_QUERY {
+ validator.Debug("Parsing Query transaction...")
+
+ executeStateKey, err := validator.getStateKeyFromTransaction(executeTx)
+
+ // Compute deployTxKey key from the deploy transaction. This is used to decrypt the actual state
+ // of the chaincode
+ deployTxKey := primitives.HMAC(deployStateKey, deployTx.Nonce)
+
+ // Compute the key used to encrypt the result of the query
+ //queryKey := utils.HMACTruncated(executeStateKey, append([]byte{6}, executeTx.Nonce...), utils.AESKeyLength)
+
+ // Init the state encryptor
+ se := queryStateEncryptor{}
+ err = se.init(validator.nodeImpl, executeStateKey, deployTxKey)
+ if err != nil {
+ return nil, err
+ }
+
+ return &se, nil
+ }
+
+ // Compute deployTxKey key from the deploy transaction
+ deployTxKey := primitives.HMAC(deployStateKey, deployTx.Nonce)
+
+ // Mask executeTx.Nonce
+ executeTxNonce := primitives.HMACTruncated(deployTxKey, primitives.Hash(executeTx.Nonce), primitives.NonceSize)
+
+ // Compute stateKey to encrypt the states and nonceStateKey to generates IVs. This
+ // allows validators to reach consesus
+ stateKey := primitives.HMACTruncated(deployTxKey, append([]byte{3}, executeTxNonce...), primitives.AESKeyLength)
+ nonceStateKey := primitives.HMAC(deployTxKey, append([]byte{4}, executeTxNonce...))
+
+ // Init the state encryptor
+ se := stateEncryptorImpl{}
+ err = se.init(validator.nodeImpl, stateKey, nonceStateKey, deployTxKey, executeTxNonce)
+ if err != nil {
+ return nil, err
+ }
+
+ return &se, nil
+}
+
+func (validator *validatorImpl) getStateKeyFromTransaction(tx *obc.Transaction) ([]byte, error) {
+ cipher, err := validator.eciesSPI.NewAsymmetricCipherFromPrivateKey(validator.chainPrivateKey)
+ if err != nil {
+ validator.Errorf("Failed init decryption engine [%s].", err.Error())
+ return nil, err
+ }
+
+ msgToValidatorsRaw, err := cipher.Process(tx.ToValidators)
+ if err != nil {
+ validator.Errorf("Failed decrypting message to validators [% x]: [%s].", tx.ToValidators, err.Error())
+ return nil, err
+ }
+
+ msgToValidators := new(chainCodeValidatorMessage1_2)
+ _, err = asn1.Unmarshal(msgToValidatorsRaw, msgToValidators)
+ if err != nil {
+ validator.Errorf("Failed unmarshalling message to validators [% x]: [%s].", msgToValidators, err.Error())
+ return nil, err
+ }
+
+ return msgToValidators.StateKey, nil
+}
+
+type stateEncryptorImpl struct {
+ node *nodeImpl
+
+ deployTxKey []byte
+ invokeTxNonce []byte
+
+ stateKey []byte
+ nonceStateKey []byte
+
+ gcmEnc cipher.AEAD
+ nonceSize int
+
+ counter uint64
+}
+
+func (se *stateEncryptorImpl) init(node *nodeImpl, stateKey, nonceStateKey, deployTxKey, invokeTxNonce []byte) error {
+ // Initi fields
+ se.counter = 0
+ se.node = node
+ se.stateKey = stateKey
+ se.nonceStateKey = nonceStateKey
+ se.deployTxKey = deployTxKey
+ se.invokeTxNonce = invokeTxNonce
+
+ // Init aes
+ c, err := aes.NewCipher(se.stateKey)
+ if err != nil {
+ return err
+ }
+
+ // Init gcm for encryption
+ se.gcmEnc, err = cipher.NewGCM(c)
+ if err != nil {
+ return err
+ }
+
+ // Init nonce size
+ se.nonceSize = se.gcmEnc.NonceSize()
+ return nil
+}
+
+func (se *stateEncryptorImpl) Encrypt(msg []byte) ([]byte, error) {
+ var b = make([]byte, 8)
+ binary.BigEndian.PutUint64(b, se.counter)
+
+ se.node.Debugf("Encrypting with counter [% x].", b)
+ // se.log.Infof("Encrypting with txNonce ", utils.EncodeBase64(se.txNonce))
+
+ nonce := primitives.HMACTruncated(se.nonceStateKey, b, se.nonceSize)
+
+ se.counter++
+
+ // Seal will append the output to the first argument; the usage
+ // here appends the ciphertext to the nonce. The final parameter
+ // is any additional data to be authenticated.
+ out := se.gcmEnc.Seal(nonce, nonce, msg, se.invokeTxNonce)
+
+ return append(se.invokeTxNonce, out...), nil
+}
+
+func (se *stateEncryptorImpl) Decrypt(raw []byte) ([]byte, error) {
+ if len(raw) == 0 {
+ // A nil ciphertext decrypts to nil
+ return nil, nil
+ }
+
+ if len(raw) <= primitives.NonceSize {
+ return nil, utils.ErrDecrypt
+ }
+
+ // raw consists of (txNonce, ct)
+ txNonce := raw[:primitives.NonceSize]
+ // se.log.Infof("Decrypting with txNonce ", utils.EncodeBase64(txNonce))
+ ct := raw[primitives.NonceSize:]
+
+ nonce := make([]byte, se.nonceSize)
+ copy(nonce, ct)
+
+ key := primitives.HMACTruncated(se.deployTxKey, append([]byte{3}, txNonce...), primitives.AESKeyLength)
+ // se.log.Infof("Decrypting with key ", utils.EncodeBase64(key))
+ c, err := aes.NewCipher(key)
+ if err != nil {
+ return nil, err
+ }
+
+ gcm, err := cipher.NewGCM(c)
+ if err != nil {
+ return nil, err
+ }
+
+ se.nonceSize = se.gcmEnc.NonceSize()
+
+ out, err := gcm.Open(nil, nonce, ct[se.nonceSize:], txNonce)
+ if err != nil {
+ return nil, utils.ErrDecrypt
+ }
+ return out, nil
+}
+
+type queryStateEncryptor struct {
+ node *nodeImpl
+
+ deployTxKey []byte
+
+ gcmEnc cipher.AEAD
+ nonceSize int
+}
+
+func (se *queryStateEncryptor) init(node *nodeImpl, queryKey, deployTxKey []byte) error {
+ // Initi fields
+ se.node = node
+ se.deployTxKey = deployTxKey
+
+ // se.log.Infof("QUERY Encrypting with key ", utils.EncodeBase64(queryKey))
+
+ // Init aes
+ c, err := aes.NewCipher(queryKey)
+ if err != nil {
+ return err
+ }
+
+ // Init gcm for encryption
+ se.gcmEnc, err = cipher.NewGCM(c)
+ if err != nil {
+ return err
+ }
+
+ // Init nonce size
+ se.nonceSize = se.gcmEnc.NonceSize()
+ return nil
+}
+
+func (se *queryStateEncryptor) Encrypt(msg []byte) ([]byte, error) {
+ nonce, err := primitives.GetRandomBytes(se.nonceSize)
+ if err != nil {
+ se.node.Errorf("Failed getting randomness [%s].", err.Error())
+ return nil, err
+ }
+
+ // Seal will append the output to the first argument; the usage
+ // here appends the ciphertext to the nonce. The final parameter
+ // is any additional data to be authenticated.
+ out := se.gcmEnc.Seal(nonce, nonce, msg, nil)
+
+ return out, nil
+}
+
+func (se *queryStateEncryptor) Decrypt(raw []byte) ([]byte, error) {
+ if len(raw) == 0 {
+ // A nil ciphertext decrypts to nil
+ return nil, nil
+ }
+
+ if len(raw) <= primitives.NonceSize {
+ return nil, utils.ErrDecrypt
+ }
+
+ // raw consists of (txNonce, ct)
+ txNonce := raw[:primitives.NonceSize]
+ // se.log.Infof("Decrypting with txNonce ", utils.EncodeBase64(txNonce))
+ ct := raw[primitives.NonceSize:]
+
+ nonce := make([]byte, se.nonceSize)
+ copy(nonce, ct)
+
+ key := primitives.HMACTruncated(se.deployTxKey, append([]byte{3}, txNonce...), primitives.AESKeyLength)
+ // se.log.Infof("Decrypting with key ", utils.EncodeBase64(key))
+ c, err := aes.NewCipher(key)
+ if err != nil {
+ return nil, err
+ }
+
+ gcm, err := cipher.NewGCM(c)
+ if err != nil {
+ return nil, err
+ }
+
+ se.nonceSize = se.gcmEnc.NonceSize()
+
+ out, err := gcm.Open(nil, nonce, ct[se.nonceSize:], txNonce)
+ if err != nil {
+ return nil, utils.ErrDecrypt
+ }
+ return out, nil
+}
diff --git a/core/crypto/validator_validity_period.go b/core/crypto/validator_validity_period.go
new file mode 100644
index 00000000000..b934b9a5ba8
--- /dev/null
+++ b/core/crypto/validator_validity_period.go
@@ -0,0 +1,95 @@
+/*
+Copyright IBM Corp. 2016 All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package crypto
+
+import (
+ "errors"
+ "strconv"
+ "time"
+
+ "github.com/spf13/viper"
+
+ "github.com/hyperledger/fabric/core/crypto/primitives"
+ "github.com/hyperledger/fabric/core/ledger"
+ obc "github.com/hyperledger/fabric/protos"
+)
+
+//We are temporarily disabling the validity period functionality
+var allowValidityPeriodVerification = false
+
+func validityPeriodVerificationEnabled() bool {
+ // If the verification of the validity period is enabled in the configuration file return the configured value
+ if viper.IsSet("peer.validator.validity-period.verification") {
+ return viper.GetBool("peer.validator.validity-period.verification")
+ }
+
+ // Validity period verification is enabled by default if no configuration was specified.
+ return true
+}
+
+func (validator *validatorImpl) verifyValidityPeriod(tx *obc.Transaction) (*obc.Transaction, error) {
+ if tx.Cert != nil && tx.Signature != nil {
+
+ // Unmarshal cert
+ cert, err := primitives.DERToX509Certificate(tx.Cert)
+ if err != nil {
+ validator.Errorf("verifyValidityPeriod: failed unmarshalling cert %s:", err)
+ return tx, err
+ }
+
+ cid := viper.GetString("pki.validity-period.chaincodeHash")
+
+ ledger, err := ledger.GetLedger()
+ if err != nil {
+ validator.Errorf("verifyValidityPeriod: failed getting access to the ledger %s:", err)
+ return tx, err
+ }
+
+ vpBytes, err := ledger.GetState(cid, "system.validity.period", true)
+ if err != nil {
+ validator.Errorf("verifyValidityPeriod: failed reading validity period from the ledger %s:", err)
+ return tx, err
+ }
+
+ i, err := strconv.ParseInt(string(vpBytes[:]), 10, 64)
+ if err != nil {
+ validator.Errorf("verifyValidityPeriod: failed to parse validity period %s:", err)
+ return tx, err
+ }
+
+ vp := time.Unix(i, 0)
+
+ var errMsg string
+
+ // Verify the validity period of the TCert
+ switch {
+ case cert.NotAfter.Before(cert.NotBefore):
+ errMsg = "verifyValidityPeriod: certificate validity period is invalid"
+ case vp.Before(cert.NotBefore):
+ errMsg = "verifyValidityPeriod: certificate validity period is in the future"
+ case vp.After(cert.NotAfter):
+ errMsg = "verifyValidityPeriod: certificate validity period is in the past"
+ }
+
+ if errMsg != "" {
+ validator.Error(errMsg)
+ return tx, errors.New(errMsg)
+ }
+ }
+
+ return tx, nil
+}
diff --git a/core/db/db.go b/core/db/db.go
new file mode 100644
index 00000000000..e4515b501e9
--- /dev/null
+++ b/core/db/db.go
@@ -0,0 +1,362 @@
+/*
+Copyright IBM Corp. 2016 All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package db
+
+import (
+ "fmt"
+ "io"
+ "os"
+ "path"
+ "strings"
+ "sync"
+
+ "github.com/op/go-logging"
+ "github.com/spf13/viper"
+ "github.com/tecbot/gorocksdb"
+)
+
+var dbLogger = logging.MustGetLogger("db")
+
+const blockchainCF = "blockchainCF"
+const stateCF = "stateCF"
+const stateDeltaCF = "stateDeltaCF"
+const indexesCF = "indexesCF"
+const persistCF = "persistCF"
+
+var columnfamilies = []string{
+ blockchainCF, // blocks of the block chain
+ stateCF, // world state
+ stateDeltaCF, // open transaction state
+ indexesCF, // tx uuid -> blockno
+ persistCF, // persistent per-peer state (consensus)
+}
+
+type dbState int32
+
+const (
+ closed dbState = iota
+ opened
+)
+
+// OpenchainDB encapsulates rocksdb's structures
+type OpenchainDB struct {
+ DB *gorocksdb.DB
+ BlockchainCF *gorocksdb.ColumnFamilyHandle
+ StateCF *gorocksdb.ColumnFamilyHandle
+ StateDeltaCF *gorocksdb.ColumnFamilyHandle
+ IndexesCF *gorocksdb.ColumnFamilyHandle
+ PersistCF *gorocksdb.ColumnFamilyHandle
+ dbState dbState
+ mux sync.Mutex
+}
+
+var openchainDB = Create()
+
+// Create create an openchainDB instance
+func Create() *OpenchainDB {
+ return &OpenchainDB{dbState: closed}
+}
+
+// GetDBHandle get an opened openchainDB singleton
+func GetDBHandle() *OpenchainDB {
+ openchainDB.Open()
+ return openchainDB
+}
+
+// GetFromBlockchainCF get value for given key from column family - blockchainCF
+func (openchainDB *OpenchainDB) GetFromBlockchainCF(key []byte) ([]byte, error) {
+ return openchainDB.Get(openchainDB.BlockchainCF, key)
+}
+
+// GetFromBlockchainCFSnapshot get value for given key from column family in a DB snapshot - blockchainCF
+func (openchainDB *OpenchainDB) GetFromBlockchainCFSnapshot(snapshot *gorocksdb.Snapshot, key []byte) ([]byte, error) {
+ return openchainDB.getFromSnapshot(snapshot, openchainDB.BlockchainCF, key)
+}
+
+// GetFromStateCF get value for given key from column family - stateCF
+func (openchainDB *OpenchainDB) GetFromStateCF(key []byte) ([]byte, error) {
+ return openchainDB.Get(openchainDB.StateCF, key)
+}
+
+// GetFromStateDeltaCF get value for given key from column family - stateDeltaCF
+func (openchainDB *OpenchainDB) GetFromStateDeltaCF(key []byte) ([]byte, error) {
+ return openchainDB.Get(openchainDB.StateDeltaCF, key)
+}
+
+// GetFromIndexesCF get value for given key from column family - indexCF
+func (openchainDB *OpenchainDB) GetFromIndexesCF(key []byte) ([]byte, error) {
+ return openchainDB.Get(openchainDB.IndexesCF, key)
+}
+
+// GetBlockchainCFIterator get iterator for column family - blockchainCF
+func (openchainDB *OpenchainDB) GetBlockchainCFIterator() *gorocksdb.Iterator {
+ return openchainDB.GetIterator(openchainDB.BlockchainCF)
+}
+
+// GetStateCFIterator get iterator for column family - stateCF
+func (openchainDB *OpenchainDB) GetStateCFIterator() *gorocksdb.Iterator {
+ return openchainDB.GetIterator(openchainDB.StateCF)
+}
+
+// GetStateCFSnapshotIterator get iterator for column family - stateCF. This iterator
+// is based on a snapshot and should be used for long running scans, such as
+// reading the entire state. Remember to call iterator.Close() when you are done.
+func (openchainDB *OpenchainDB) GetStateCFSnapshotIterator(snapshot *gorocksdb.Snapshot) *gorocksdb.Iterator {
+ return openchainDB.getSnapshotIterator(snapshot, openchainDB.StateCF)
+}
+
+// GetStateDeltaCFIterator get iterator for column family - stateDeltaCF
+func (openchainDB *OpenchainDB) GetStateDeltaCFIterator() *gorocksdb.Iterator {
+ return openchainDB.GetIterator(openchainDB.StateDeltaCF)
+}
+
+// GetSnapshot returns a point-in-time view of the DB. You MUST call snapshot.Release()
+// when you are done with the snapshot.
+func (openchainDB *OpenchainDB) GetSnapshot() *gorocksdb.Snapshot {
+ return openchainDB.DB.NewSnapshot()
+}
+
+func getDBPath() string {
+ dbPath := viper.GetString("peer.fileSystemPath")
+ if dbPath == "" {
+ panic("DB path not specified in configuration file. Please check that property 'peer.fileSystemPath' is set")
+ }
+ if !strings.HasSuffix(dbPath, "/") {
+ dbPath = dbPath + "/"
+ }
+ return dbPath + "db"
+}
+
+// Open open underlying rocksdb
+func (openchainDB *OpenchainDB) Open() {
+ openchainDB.mux.Lock()
+ if openchainDB.dbState == opened {
+ openchainDB.mux.Unlock()
+ return
+ }
+
+ defer openchainDB.mux.Unlock()
+
+ dbPath := getDBPath()
+ missing, err := dirMissingOrEmpty(dbPath)
+ if err != nil {
+ panic(fmt.Sprintf("Error while trying to open DB: %s", err))
+ }
+ dbLogger.Debugf("Is db path [%s] empty [%t]", dbPath, missing)
+
+ if missing {
+ err = os.MkdirAll(path.Dir(dbPath), 0755)
+ if err != nil {
+ panic(fmt.Sprintf("Error making directory path [%s]: %s", dbPath, err))
+ }
+ }
+
+ opts := gorocksdb.NewDefaultOptions()
+ defer opts.Destroy()
+
+ opts.SetCreateIfMissing(missing)
+ opts.SetCreateIfMissingColumnFamilies(true)
+
+ cfNames := []string{"default"}
+ cfNames = append(cfNames, columnfamilies...)
+ var cfOpts []*gorocksdb.Options
+ for range cfNames {
+ cfOpts = append(cfOpts, opts)
+ }
+
+ db, cfHandlers, err := gorocksdb.OpenDbColumnFamilies(opts, dbPath, cfNames, cfOpts)
+
+ if err != nil {
+ panic(fmt.Sprintf("Error opening DB: %s", err))
+ }
+
+ openchainDB.DB = db
+ openchainDB.BlockchainCF = cfHandlers[1]
+ openchainDB.StateCF = cfHandlers[2]
+ openchainDB.StateDeltaCF = cfHandlers[3]
+ openchainDB.IndexesCF = cfHandlers[4]
+ openchainDB.PersistCF = cfHandlers[5]
+ openchainDB.dbState = opened
+}
+
+// Close releases all column family handles and closes rocksdb
+func (openchainDB *OpenchainDB) Close() {
+ openchainDB.mux.Lock()
+ if openchainDB.dbState == closed {
+ openchainDB.mux.Unlock()
+ return
+ }
+
+ defer openchainDB.mux.Unlock()
+ openchainDB.BlockchainCF.Destroy()
+ openchainDB.StateCF.Destroy()
+ openchainDB.StateDeltaCF.Destroy()
+ openchainDB.IndexesCF.Destroy()
+ openchainDB.PersistCF.Destroy()
+ openchainDB.DB.Close()
+ openchainDB.dbState = closed
+}
+
+// DeleteState delets ALL state keys/values from the DB. This is generally
+// only used during state synchronization when creating a new state from
+// a snapshot.
+func (openchainDB *OpenchainDB) DeleteState() error {
+ err := openchainDB.DB.DropColumnFamily(openchainDB.StateCF)
+ if err != nil {
+ dbLogger.Errorf("Error dropping state CF: %s", err)
+ return err
+ }
+ err = openchainDB.DB.DropColumnFamily(openchainDB.StateDeltaCF)
+ if err != nil {
+ dbLogger.Errorf("Error dropping state delta CF: %s", err)
+ return err
+ }
+ opts := gorocksdb.NewDefaultOptions()
+ defer opts.Destroy()
+ openchainDB.StateCF, err = openchainDB.DB.CreateColumnFamily(opts, stateCF)
+ if err != nil {
+ dbLogger.Errorf("Error creating state CF: %s", err)
+ return err
+ }
+ openchainDB.StateDeltaCF, err = openchainDB.DB.CreateColumnFamily(opts, stateDeltaCF)
+ if err != nil {
+ dbLogger.Errorf("Error creating state delta CF: %s", err)
+ return err
+ }
+ return nil
+}
+
+// Get returns the valud for the given column family and key
+func (openchainDB *OpenchainDB) Get(cfHandler *gorocksdb.ColumnFamilyHandle, key []byte) ([]byte, error) {
+ opt := gorocksdb.NewDefaultReadOptions()
+ defer opt.Destroy()
+ slice, err := openchainDB.DB.GetCF(opt, cfHandler, key)
+ if err != nil {
+ fmt.Println("Error while trying to retrieve key:", key)
+ return nil, err
+ }
+ defer slice.Free()
+ if slice.Data() == nil {
+ return nil, nil
+ }
+ data := makeCopy(slice.Data())
+ return data, nil
+}
+
+// Put saves the key/value in the given column family
+func (openchainDB *OpenchainDB) Put(cfHandler *gorocksdb.ColumnFamilyHandle, key []byte, value []byte) error {
+ opt := gorocksdb.NewDefaultWriteOptions()
+ defer opt.Destroy()
+ err := openchainDB.DB.PutCF(opt, cfHandler, key, value)
+ if err != nil {
+ fmt.Println("Error while trying to write key:", key)
+ return err
+ }
+ return nil
+}
+
+// Delete delets the given key in the specified column family
+func (openchainDB *OpenchainDB) Delete(cfHandler *gorocksdb.ColumnFamilyHandle, key []byte) error {
+ opt := gorocksdb.NewDefaultWriteOptions()
+ defer opt.Destroy()
+ err := openchainDB.DB.DeleteCF(opt, cfHandler, key)
+ if err != nil {
+ fmt.Println("Error while trying to delete key:", key)
+ return err
+ }
+ return nil
+}
+
+func (openchainDB *OpenchainDB) getFromSnapshot(snapshot *gorocksdb.Snapshot, cfHandler *gorocksdb.ColumnFamilyHandle, key []byte) ([]byte, error) {
+ opt := gorocksdb.NewDefaultReadOptions()
+ defer opt.Destroy()
+ opt.SetSnapshot(snapshot)
+ slice, err := openchainDB.DB.GetCF(opt, cfHandler, key)
+ if err != nil {
+ fmt.Println("Error while trying to retrieve key:", key)
+ return nil, err
+ }
+ defer slice.Free()
+ data := append([]byte(nil), slice.Data()...)
+ return data, nil
+}
+
+// GetIterator returns an iterator for the given column family
+func (openchainDB *OpenchainDB) GetIterator(cfHandler *gorocksdb.ColumnFamilyHandle) *gorocksdb.Iterator {
+ opt := gorocksdb.NewDefaultReadOptions()
+ opt.SetFillCache(true)
+ defer opt.Destroy()
+ return openchainDB.DB.NewIteratorCF(opt, cfHandler)
+}
+
+func (openchainDB *OpenchainDB) getSnapshotIterator(snapshot *gorocksdb.Snapshot, cfHandler *gorocksdb.ColumnFamilyHandle) *gorocksdb.Iterator {
+ opt := gorocksdb.NewDefaultReadOptions()
+ defer opt.Destroy()
+ opt.SetSnapshot(snapshot)
+ iter := openchainDB.DB.NewIteratorCF(opt, cfHandler)
+ return iter
+}
+
+func dirMissingOrEmpty(path string) (bool, error) {
+ dirExists, err := dirExists(path)
+ if err != nil {
+ return false, err
+ }
+ if !dirExists {
+ return true, nil
+ }
+
+ dirEmpty, err := dirEmpty(path)
+ if err != nil {
+ return false, err
+ }
+ if dirEmpty {
+ return true, nil
+ }
+ return false, nil
+}
+
+func dirExists(path string) (bool, error) {
+ _, err := os.Stat(path)
+ if err == nil {
+ return true, nil
+ }
+ if os.IsNotExist(err) {
+ return false, nil
+ }
+ return false, err
+}
+
+func dirEmpty(path string) (bool, error) {
+ f, err := os.Open(path)
+ if err != nil {
+ return false, err
+ }
+ defer f.Close()
+
+ _, err = f.Readdir(1)
+ if err == io.EOF {
+ return true, nil
+ }
+ return false, err
+}
+
+func makeCopy(src []byte) []byte {
+ dest := make([]byte, len(src))
+ copy(dest, src)
+ return dest
+}
diff --git a/core/db/db_test.go b/core/db/db_test.go
new file mode 100644
index 00000000000..8e6cfa15db7
--- /dev/null
+++ b/core/db/db_test.go
@@ -0,0 +1,315 @@
+/*
+Copyright IBM Corp. 2016 All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package db
+
+import (
+ "bytes"
+ "io/ioutil"
+ "os"
+ "testing"
+
+ "github.com/spf13/viper"
+ "github.com/tecbot/gorocksdb"
+)
+
+func TestMain(m *testing.M) {
+ setupTestConfig()
+ os.Exit(m.Run())
+}
+
+func TestGetDBPathEmptyPath(t *testing.T) {
+ originalSetting := viper.GetString("peer.fileSystemPath")
+ viper.Set("peer.fileSystemPath", "")
+ defer func() {
+ x := recover()
+ if x == nil {
+ t.Fatal("A panic should have been caused here.")
+ }
+ }()
+ defer viper.Set("peer.fileSystemPath", originalSetting)
+ GetDBHandle()
+}
+
+func TestCreateDB(t *testing.T) {
+ openchainDB := Create()
+ openchainDB.Open()
+ defer deleteTestDBPath()
+ defer openchainDB.Close()
+}
+
+func TestOpenDB_DirDoesNotExist(t *testing.T) {
+ openchainDB := Create()
+ deleteTestDBPath()
+
+ defer deleteTestDBPath()
+ defer openchainDB.Close()
+ defer func() {
+ if r := recover(); r != nil {
+ t.Fatalf("Failed to open DB: %s", r)
+ }
+ }()
+ openchainDB.Open()
+}
+
+func TestOpenDB_NonEmptyDirExists(t *testing.T) {
+ openchainDB := Create()
+ deleteTestDBPath()
+ createNonEmptyTestDBPath()
+
+ defer deleteTestDBPath()
+ defer openchainDB.Close()
+ defer func() {
+ if r := recover(); r == nil {
+ t.Fatalf("dbPath is already exists. DB open should throw error")
+ }
+ }()
+ openchainDB.Open()
+}
+
+func TestWriteAndRead(t *testing.T) {
+ openchainDB := GetDBHandle()
+ defer deleteTestDBPath()
+ defer openchainDB.Close()
+ performBasicReadWrite(openchainDB, t)
+}
+
+// This test verifies that when a new column family is added to the DB
+// users at an older level of the DB will still be able to open it with new code
+func TestDBColumnUpgrade(t *testing.T) {
+ openchainDB := GetDBHandle()
+ openchainDB.Close()
+
+ oldcfs := columnfamilies
+ columnfamilies = append([]string{"Testing"}, columnfamilies...)
+ defer func() {
+ columnfamilies = oldcfs
+ }()
+ openchainDB = GetDBHandle()
+
+ defer deleteTestDBPath()
+ defer openchainDB.Close()
+ defer func() {
+ if r := recover(); r != nil {
+ t.Fatalf("Error re-opening DB with upgraded columnFamilies")
+ }
+ }()
+}
+
+func TestDeleteState(t *testing.T) {
+ testDBWrapper := NewTestDBWrapper()
+ testDBWrapper.CleanDB(t)
+ openchainDB := GetDBHandle()
+ defer testDBWrapper.cleanup()
+ openchainDB.Put(openchainDB.StateCF, []byte("key1"), []byte("value1"))
+ openchainDB.Put(openchainDB.StateDeltaCF, []byte("key2"), []byte("value2"))
+ openchainDB.DeleteState()
+ value1, err := openchainDB.GetFromStateCF([]byte("key1"))
+ if err != nil {
+ t.Fatalf("Error getting in value: %s", err)
+ }
+ if value1 != nil {
+ t.Fatalf("A nil value expected. Found [%s]", value1)
+ }
+
+ value2, err := openchainDB.GetFromStateCF([]byte("key2"))
+ if err != nil {
+ t.Fatalf("Error getting in value: %s", err)
+ }
+ if value2 != nil {
+ t.Fatalf("A nil value expected. Found [%s]", value2)
+ }
+}
+
+func TestDBSnapshot(t *testing.T) {
+ testDBWrapper := NewTestDBWrapper()
+ testDBWrapper.CleanDB(t)
+ openchainDB := GetDBHandle()
+ defer testDBWrapper.cleanup()
+
+ // write key-values
+ openchainDB.Put(openchainDB.BlockchainCF, []byte("key1"), []byte("value1"))
+ openchainDB.Put(openchainDB.BlockchainCF, []byte("key2"), []byte("value2"))
+
+ // create a snapshot
+ snapshot := openchainDB.GetSnapshot()
+
+ // add/delete/modify key-values
+ openchainDB.Delete(openchainDB.BlockchainCF, []byte("key1"))
+ openchainDB.Put(openchainDB.BlockchainCF, []byte("key2"), []byte("value2_new"))
+ openchainDB.Put(openchainDB.BlockchainCF, []byte("key3"), []byte("value3"))
+
+ // test key-values from latest data in db
+ v1, _ := openchainDB.GetFromBlockchainCF([]byte("key1"))
+ v2, _ := openchainDB.GetFromBlockchainCF([]byte("key2"))
+ v3, _ := openchainDB.GetFromBlockchainCF([]byte("key3"))
+ if !bytes.Equal(v1, nil) {
+ t.Fatalf("Expected value from db is 'nil', found [%s]", v1)
+ }
+ if !bytes.Equal(v2, []byte("value2_new")) {
+ t.Fatalf("Expected value from db [%s], found [%s]", "value2_new", v2)
+ }
+ if !bytes.Equal(v3, []byte("value3")) {
+ t.Fatalf("Expected value from db [%s], found [%s]", "value3", v3)
+ }
+
+ // test key-values from snapshot
+ v1, _ = openchainDB.GetFromBlockchainCFSnapshot(snapshot, []byte("key1"))
+ v2, _ = openchainDB.GetFromBlockchainCFSnapshot(snapshot, []byte("key2"))
+ v3, err := openchainDB.GetFromBlockchainCFSnapshot(snapshot, []byte("key3"))
+ if err != nil {
+ t.Fatalf("Error: %s", err)
+ }
+
+ if !bytes.Equal(v1, []byte("value1")) {
+ t.Fatalf("Expected value from db snapshot [%s], found [%s]", "value1", v1)
+ }
+
+ if !bytes.Equal(v2, []byte("value2")) {
+ t.Fatalf("Expected value from db snapshot [%s], found [%s]", "value1", v2)
+ }
+
+ if !bytes.Equal(v3, nil) {
+ t.Fatalf("Expected value from db snapshot is 'nil', found [%s]", v3)
+ }
+}
+
+func TestDBIteratorAndSnapshotIterator(t *testing.T) {
+ testDBWrapper := NewTestDBWrapper()
+ testDBWrapper.CleanDB(t)
+ openchainDB := GetDBHandle()
+ defer testDBWrapper.cleanup()
+
+ // write key-values
+ openchainDB.Put(openchainDB.StateCF, []byte("key1"), []byte("value1"))
+ openchainDB.Put(openchainDB.StateCF, []byte("key2"), []byte("value2"))
+
+ // create a snapshot
+ snapshot := openchainDB.GetSnapshot()
+
+ // add/delete/modify key-values
+ openchainDB.Delete(openchainDB.StateCF, []byte("key1"))
+ openchainDB.Put(openchainDB.StateCF, []byte("key2"), []byte("value2_new"))
+ openchainDB.Put(openchainDB.StateCF, []byte("key3"), []byte("value3"))
+
+ // test snapshot iterator
+ itr := openchainDB.GetStateCFSnapshotIterator(snapshot)
+ defer itr.Close()
+ testIterator(t, itr, map[string][]byte{"key1": []byte("value1"), "key2": []byte("value2")})
+
+ // test iterator over latest data in stateCF
+ itr = openchainDB.GetStateCFIterator()
+ defer itr.Close()
+ testIterator(t, itr, map[string][]byte{"key2": []byte("value2_new"), "key3": []byte("value3")})
+
+ openchainDB.Put(openchainDB.StateDeltaCF, []byte("key4"), []byte("value4"))
+ openchainDB.Put(openchainDB.StateDeltaCF, []byte("key5"), []byte("value5"))
+ itr = openchainDB.GetStateDeltaCFIterator()
+ defer itr.Close()
+ testIterator(t, itr, map[string][]byte{"key4": []byte("value4"), "key5": []byte("value5")})
+
+ openchainDB.Put(openchainDB.BlockchainCF, []byte("key6"), []byte("value6"))
+ openchainDB.Put(openchainDB.BlockchainCF, []byte("key7"), []byte("value7"))
+ itr = openchainDB.GetBlockchainCFIterator()
+ defer itr.Close()
+ testIterator(t, itr, map[string][]byte{"key6": []byte("value6"), "key7": []byte("value7")})
+}
+
+// db helper functions
+func testIterator(t *testing.T, itr *gorocksdb.Iterator, expectedValues map[string][]byte) {
+ itrResults := make(map[string][]byte)
+ itr.SeekToFirst()
+ for ; itr.Valid(); itr.Next() {
+ key := itr.Key()
+ value := itr.Value()
+ k := makeCopy(key.Data())
+ v := makeCopy(value.Data())
+ itrResults[string(k)] = v
+ }
+ if len(itrResults) != len(expectedValues) {
+ t.Fatalf("Expected [%d] results from iterator, found [%d]", len(expectedValues), len(itrResults))
+ }
+ for k, v := range expectedValues {
+ if !bytes.Equal(itrResults[k], v) {
+ t.Fatalf("Wrong value for key [%s]. Expected [%s], found [%s]", k, itrResults[k], v)
+ }
+ }
+}
+
+func createNonEmptyTestDBPath() {
+ dbPath := viper.GetString("peer.fileSystemPath")
+ os.MkdirAll(dbPath+"/db/tmpFile", 0775)
+}
+
+func deleteTestDBPath() {
+ dbPath := viper.GetString("peer.fileSystemPath")
+ os.RemoveAll(dbPath)
+}
+
+func setupTestConfig() {
+ tempDir, err := ioutil.TempDir("", "fabric-db-test")
+ if err != nil {
+ panic(err)
+ }
+ viper.Set("peer.fileSystemPath", tempDir)
+ deleteTestDBPath()
+}
+
+func performBasicReadWrite(openchainDB *OpenchainDB, t *testing.T) {
+ opt := gorocksdb.NewDefaultWriteOptions()
+ defer opt.Destroy()
+ writeBatch := gorocksdb.NewWriteBatch()
+ defer writeBatch.Destroy()
+ writeBatch.PutCF(openchainDB.BlockchainCF, []byte("dummyKey"), []byte("dummyValue"))
+ writeBatch.PutCF(openchainDB.StateCF, []byte("dummyKey1"), []byte("dummyValue1"))
+ writeBatch.PutCF(openchainDB.StateDeltaCF, []byte("dummyKey2"), []byte("dummyValue2"))
+ writeBatch.PutCF(openchainDB.IndexesCF, []byte("dummyKey3"), []byte("dummyValue3"))
+ err := openchainDB.DB.Write(opt, writeBatch)
+ if err != nil {
+ t.Fatalf("Error while writing to db: %s", err)
+ }
+ value, err := openchainDB.GetFromBlockchainCF([]byte("dummyKey"))
+ if err != nil {
+ t.Fatalf("read error = [%s]", err)
+ }
+ if !bytes.Equal(value, []byte("dummyValue")) {
+ t.Fatalf("read error. Bytes not equal. Expected [%s], found [%s]", "dummyValue", value)
+ }
+
+ value, err = openchainDB.GetFromStateCF([]byte("dummyKey1"))
+ if err != nil {
+ t.Fatalf("read error = [%s]", err)
+ }
+ if !bytes.Equal(value, []byte("dummyValue1")) {
+ t.Fatalf("read error. Bytes not equal. Expected [%s], found [%s]", "dummyValue1", value)
+ }
+
+ value, err = openchainDB.GetFromStateDeltaCF([]byte("dummyKey2"))
+ if err != nil {
+ t.Fatalf("read error = [%s]", err)
+ }
+ if !bytes.Equal(value, []byte("dummyValue2")) {
+ t.Fatalf("read error. Bytes not equal. Expected [%s], found [%s]", "dummyValue2", value)
+ }
+
+ value, err = openchainDB.GetFromIndexesCF([]byte("dummyKey3"))
+ if err != nil {
+ t.Fatalf("read error = [%s]", err)
+ }
+ if !bytes.Equal(value, []byte("dummyValue3")) {
+ t.Fatalf("read error. Bytes not equal. Expected [%s], found [%s]", "dummyValue3", value)
+ }
+}
diff --git a/core/db/db_test_exports.go b/core/db/db_test_exports.go
new file mode 100644
index 00000000000..cce96172546
--- /dev/null
+++ b/core/db/db_test_exports.go
@@ -0,0 +1,138 @@
+/*
+Copyright IBM Corp. 2016 All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package db
+
+import (
+ "os"
+ "testing"
+
+ "github.com/spf13/viper"
+ "github.com/tecbot/gorocksdb"
+)
+
+// TestDBWrapper wraps the db. Can be used by other modules for testing
+type TestDBWrapper struct {
+ performCleanup bool
+}
+
+// NewTestDBWrapper constructs a new TestDBWrapper
+func NewTestDBWrapper() *TestDBWrapper {
+ return &TestDBWrapper{}
+}
+
+///////////////////////////
+// Test db creation and cleanup functions
+
+// CleanDB This method closes existing db, remove the db dir.
+// Can be called before starting a test so that data from other tests does not interfere
+func (testDB *TestDBWrapper) CleanDB(t testing.TB) {
+ // cleaning up test db here so that each test does not have to call it explicitly
+ // at the end of the test
+ testDB.cleanup()
+ testDB.removeDBPath()
+ t.Logf("Creating testDB")
+
+ testDB.performCleanup = true
+}
+
+// CreateFreshDBGinkgo creates a fresh database for ginkgo testing
+func (testDB *TestDBWrapper) CreateFreshDBGinkgo() {
+ // cleaning up test db here so that each test does not have to call it explicitly
+ // at the end of the test
+ testDB.cleanup()
+ testDB.removeDBPath()
+ testDB.performCleanup = true
+}
+
+func (testDB *TestDBWrapper) cleanup() {
+ if testDB.performCleanup {
+ GetDBHandle().Close()
+ testDB.performCleanup = false
+ }
+}
+
+func (testDB *TestDBWrapper) removeDBPath() {
+ dbPath := viper.GetString("peer.fileSystemPath")
+ os.RemoveAll(dbPath)
+}
+
+// WriteToDB tests can use this method for persisting a given batch to db
+func (testDB *TestDBWrapper) WriteToDB(t testing.TB, writeBatch *gorocksdb.WriteBatch) {
+ opt := gorocksdb.NewDefaultWriteOptions()
+ defer opt.Destroy()
+ err := GetDBHandle().DB.Write(opt, writeBatch)
+ if err != nil {
+ t.Fatalf("Error while writing to db. Error:%s", err)
+ }
+}
+
+// GetFromDB gets the value for the given key from default column-family
+func (testDB *TestDBWrapper) GetFromDB(t testing.TB, key []byte) []byte {
+ db := GetDBHandle().DB
+ opt := gorocksdb.NewDefaultReadOptions()
+ defer opt.Destroy()
+ slice, err := db.Get(opt, key)
+ defer slice.Free()
+ if err != nil {
+ t.Fatalf("Error while getting key-value from DB: %s", err)
+ }
+ value := append([]byte(nil), slice.Data()...)
+ return value
+}
+
+// GetFromStateCF tests can use this method for getting value from StateCF column-family
+func (testDB *TestDBWrapper) GetFromStateCF(t testing.TB, key []byte) []byte {
+ openchainDB := GetDBHandle()
+ value, err := openchainDB.GetFromStateCF(key)
+ if err != nil {
+ t.Fatalf("Error while getting from db. Error:%s", err)
+ }
+ return value
+}
+
+// GetFromStateDeltaCF tests can use this method for getting value from StateDeltaCF column-family
+func (testDB *TestDBWrapper) GetFromStateDeltaCF(t testing.TB, key []byte) []byte {
+ openchainDB := GetDBHandle()
+ value, err := openchainDB.GetFromStateDeltaCF(key)
+ if err != nil {
+ t.Fatalf("Error while getting from db. Error:%s", err)
+ }
+ return value
+}
+
+// CloseDB closes the db
+func (testDB *TestDBWrapper) CloseDB(t testing.TB) {
+ openchainDB := GetDBHandle()
+ openchainDB.Close()
+}
+
+// GetEstimatedNumKeys returns estimated number of key-values in db. This is not accurate in all the cases
+func (testDB *TestDBWrapper) GetEstimatedNumKeys(t testing.TB) map[string]string {
+ openchainDB := GetDBHandle()
+ result := make(map[string]string, 5)
+ result["stateCF"] = openchainDB.DB.GetPropertyCF("rocksdb.estimate-num-keys", openchainDB.StateCF)
+ result["stateDeltaCF"] = openchainDB.DB.GetPropertyCF("rocksdb.estimate-num-keys", openchainDB.StateDeltaCF)
+ result["blockchainCF"] = openchainDB.DB.GetPropertyCF("rocksdb.estimate-num-keys", openchainDB.BlockchainCF)
+ result["indexCF"] = openchainDB.DB.GetPropertyCF("rocksdb.estimate-num-keys", openchainDB.IndexesCF)
+ return result
+}
+
+// GetDBStats returns statistics for the database
+func (testDB *TestDBWrapper) GetDBStats() string {
+ openchainDB := GetDBHandle()
+ return openchainDB.DB.GetProperty("rocksdb.stats")
+}
diff --git a/core/devops.go b/core/devops.go
new file mode 100644
index 00000000000..4fd3d4c54fd
--- /dev/null
+++ b/core/devops.go
@@ -0,0 +1,465 @@
+/*
+Copyright IBM Corp. 2016 All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package core
+
+import (
+ "errors"
+ "fmt"
+ "strings"
+
+ "github.com/op/go-logging"
+ "github.com/spf13/viper"
+ "golang.org/x/net/context"
+
+ "encoding/asn1"
+ "encoding/base64"
+ "sync"
+
+ "github.com/golang/protobuf/proto"
+ "github.com/hyperledger/fabric/core/chaincode"
+ "github.com/hyperledger/fabric/core/chaincode/platforms"
+ "github.com/hyperledger/fabric/core/container"
+ crypto "github.com/hyperledger/fabric/core/crypto"
+ "github.com/hyperledger/fabric/core/peer"
+ "github.com/hyperledger/fabric/core/util"
+ pb "github.com/hyperledger/fabric/protos"
+)
+
+var devopsLogger = logging.MustGetLogger("devops")
+
+// NewDevopsServer creates and returns a new Devops server instance.
+func NewDevopsServer(coord peer.MessageHandlerCoordinator) *Devops {
+ d := new(Devops)
+ d.coord = coord
+ d.isSecurityEnabled = viper.GetBool("security.enabled")
+ d.bindingMap = &bindingMap{m: make(map[string]crypto.TransactionHandler)}
+ return d
+}
+
+// bindingMap Used to store map of binding to TransactionHandler
+type bindingMap struct {
+ sync.RWMutex
+ m map[string]crypto.TransactionHandler
+}
+
+// Devops implementation of Devops services
+type Devops struct {
+ coord peer.MessageHandlerCoordinator
+ isSecurityEnabled bool
+ bindingMap *bindingMap
+}
+
+func (b *bindingMap) getKeyFromBinding(binding []byte) string {
+ return base64.StdEncoding.EncodeToString(binding)
+}
+
+func (b *bindingMap) addBinding(bindingToAdd []byte, txHandler crypto.TransactionHandler) {
+ b.Lock()
+ defer b.Unlock()
+ key := b.getKeyFromBinding(bindingToAdd)
+ b.m[key] = txHandler
+}
+
+func (b *bindingMap) getTxHandlerForBinding(binding []byte) (crypto.TransactionHandler, error) {
+ b.Lock()
+ defer b.Unlock()
+ key := b.getKeyFromBinding(binding)
+ txHandler, ok := b.m[key]
+ if ok != true {
+ // TXhandler not found by key, return error
+ return nil, fmt.Errorf("Transaction handler not found for binding key = %s", key)
+ }
+ return txHandler, nil
+}
+
+// Login establishes the security context with the Devops service
+func (d *Devops) Login(ctx context.Context, secret *pb.Secret) (*pb.Response, error) {
+ if err := crypto.RegisterClient(secret.EnrollId, nil, secret.EnrollId, secret.EnrollSecret); nil != err {
+ return &pb.Response{Status: pb.Response_FAILURE, Msg: []byte(err.Error())}, nil
+ }
+ return &pb.Response{Status: pb.Response_SUCCESS}, nil
+
+ // TODO: Handle timeout and expiration
+}
+
+// Build builds the supplied chaincode image
+func (*Devops) Build(context context.Context, spec *pb.ChaincodeSpec) (*pb.ChaincodeDeploymentSpec, error) {
+ mode := viper.GetString("chaincode.mode")
+ var codePackageBytes []byte
+ if mode != chaincode.DevModeUserRunsChaincode {
+ devopsLogger.Debugf("Received build request for chaincode spec: %v", spec)
+ if err := CheckSpec(spec); err != nil {
+ return nil, err
+ }
+
+ vm, err := container.NewVM()
+ if err != nil {
+ return nil, fmt.Errorf("Error getting vm")
+ }
+
+ codePackageBytes, err = vm.BuildChaincodeContainer(spec)
+ if err != nil {
+ err = fmt.Errorf("Error getting chaincode package bytes: %s", err)
+ devopsLogger.Error(fmt.Sprintf("%s", err))
+ return nil, err
+ }
+ }
+ chaincodeDeploymentSpec := &pb.ChaincodeDeploymentSpec{ChaincodeSpec: spec, CodePackage: codePackageBytes}
+ return chaincodeDeploymentSpec, nil
+}
+
+// get chaincode bytes
+func (*Devops) getChaincodeBytes(context context.Context, spec *pb.ChaincodeSpec) (*pb.ChaincodeDeploymentSpec, error) {
+ mode := viper.GetString("chaincode.mode")
+ var codePackageBytes []byte
+ if mode != chaincode.DevModeUserRunsChaincode {
+ devopsLogger.Debugf("Received build request for chaincode spec: %v", spec)
+ var err error
+ if err = CheckSpec(spec); err != nil {
+ return nil, err
+ }
+
+ codePackageBytes, err = container.GetChaincodePackageBytes(spec)
+ if err != nil {
+ err = fmt.Errorf("Error getting chaincode package bytes: %s", err)
+ devopsLogger.Error(fmt.Sprintf("%s", err))
+ return nil, err
+ }
+ }
+ chaincodeDeploymentSpec := &pb.ChaincodeDeploymentSpec{ChaincodeSpec: spec, CodePackage: codePackageBytes}
+ return chaincodeDeploymentSpec, nil
+}
+
+// Deploy deploys the supplied chaincode image to the validators through a transaction
+func (d *Devops) Deploy(ctx context.Context, spec *pb.ChaincodeSpec) (*pb.ChaincodeDeploymentSpec, error) {
+ // get the deployment spec
+ chaincodeDeploymentSpec, err := d.getChaincodeBytes(ctx, spec)
+
+ if err != nil {
+ devopsLogger.Error(fmt.Sprintf("Error deploying chaincode spec: %v\n\n error: %s", spec, err))
+ return nil, err
+ }
+
+ // Now create the Transactions message and send to Peer.
+
+ transID := chaincodeDeploymentSpec.ChaincodeSpec.ChaincodeID.Name
+
+ var tx *pb.Transaction
+ var sec crypto.Client
+
+ if peer.SecurityEnabled() {
+ if devopsLogger.IsEnabledFor(logging.DEBUG) {
+ devopsLogger.Debugf("Initializing secure devops using context %s", spec.SecureContext)
+ }
+ sec, err = crypto.InitClient(spec.SecureContext, nil)
+ defer crypto.CloseClient(sec)
+
+ // remove the security context since we are no longer need it down stream
+ spec.SecureContext = ""
+
+ if nil != err {
+ return nil, err
+ }
+
+ if devopsLogger.IsEnabledFor(logging.DEBUG) {
+ devopsLogger.Debugf("Creating secure transaction %s", transID)
+ }
+ tx, err = sec.NewChaincodeDeployTransaction(chaincodeDeploymentSpec, transID, spec.Attributes...)
+ if nil != err {
+ return nil, err
+ }
+ } else {
+ if devopsLogger.IsEnabledFor(logging.DEBUG) {
+ devopsLogger.Debugf("Creating deployment transaction (%s)", transID)
+ }
+ tx, err = pb.NewChaincodeDeployTransaction(chaincodeDeploymentSpec, transID)
+ if err != nil {
+ return nil, fmt.Errorf("Error deploying chaincode: %s ", err)
+ }
+ }
+
+ if devopsLogger.IsEnabledFor(logging.DEBUG) {
+ devopsLogger.Debugf("Sending deploy transaction (%s) to validator", tx.Uuid)
+ }
+ resp := d.coord.ExecuteTransaction(tx)
+ if resp.Status == pb.Response_FAILURE {
+ err = fmt.Errorf(string(resp.Msg))
+ }
+
+ return chaincodeDeploymentSpec, err
+}
+
+func (d *Devops) invokeOrQuery(ctx context.Context, chaincodeInvocationSpec *pb.ChaincodeInvocationSpec, attributes []string, invoke bool) (*pb.Response, error) {
+
+ if chaincodeInvocationSpec.ChaincodeSpec.ChaincodeID.Name == "" {
+ return nil, fmt.Errorf("name not given for invoke/query")
+ }
+
+ // Now create the Transactions message and send to Peer.
+ var customIDgenAlg = strings.ToLower(chaincodeInvocationSpec.IdGenerationAlg)
+ var id string
+ var generr error
+ if customIDgenAlg != "" {
+ id, generr = util.GenerateIDWithAlg(customIDgenAlg, chaincodeInvocationSpec.ChaincodeSpec.CtorMsg.Args[0])
+ if generr != nil {
+ return nil, generr
+ }
+ } else {
+ id = util.GenerateUUID()
+ }
+ devopsLogger.Infof("Transaction ID: %v", id)
+ var transaction *pb.Transaction
+ var err error
+ var sec crypto.Client
+ if peer.SecurityEnabled() {
+ if devopsLogger.IsEnabledFor(logging.DEBUG) {
+ devopsLogger.Debugf("Initializing secure devops using context %s", chaincodeInvocationSpec.ChaincodeSpec.SecureContext)
+ }
+ sec, err = crypto.InitClient(chaincodeInvocationSpec.ChaincodeSpec.SecureContext, nil)
+ defer crypto.CloseClient(sec)
+ // remove the security context since we are no longer need it down stream
+ chaincodeInvocationSpec.ChaincodeSpec.SecureContext = ""
+ if nil != err {
+ return nil, err
+ }
+ }
+
+ transaction, err = d.createExecTx(chaincodeInvocationSpec, attributes, id, invoke, sec)
+ if err != nil {
+ return nil, err
+ }
+ if devopsLogger.IsEnabledFor(logging.DEBUG) {
+ devopsLogger.Debugf("Sending invocation transaction (%s) to validator", transaction.Uuid)
+ }
+ resp := d.coord.ExecuteTransaction(transaction)
+ if resp.Status == pb.Response_FAILURE {
+ err = fmt.Errorf(string(resp.Msg))
+ } else {
+ if !invoke && nil != sec && viper.GetBool("security.privacy") {
+ if resp.Msg, err = sec.DecryptQueryResult(transaction, resp.Msg); nil != err {
+ devopsLogger.Errorf("Failed decrypting query transaction result %s", string(resp.Msg[:]))
+ //resp = &pb.Response{Status: pb.Response_FAILURE, Msg: []byte(err.Error())}
+ }
+ }
+ }
+ return resp, err
+}
+
+func (d *Devops) createExecTx(spec *pb.ChaincodeInvocationSpec, attributes []string, uuid string, invokeTx bool, sec crypto.Client) (*pb.Transaction, error) {
+ var tx *pb.Transaction
+ var err error
+
+ //TODO What should we do with the attributes
+ if nil != sec {
+ if devopsLogger.IsEnabledFor(logging.DEBUG) {
+ devopsLogger.Debugf("Creating secure invocation transaction %s", uuid)
+ }
+ if invokeTx {
+ tx, err = sec.NewChaincodeExecute(spec, uuid, attributes...)
+ } else {
+ tx, err = sec.NewChaincodeQuery(spec, uuid, attributes...)
+ }
+ if nil != err {
+ return nil, err
+ }
+ } else {
+ if devopsLogger.IsEnabledFor(logging.DEBUG) {
+ devopsLogger.Debugf("Creating invocation transaction (%s)", uuid)
+ }
+ var t pb.Transaction_Type
+ if invokeTx {
+ t = pb.Transaction_CHAINCODE_INVOKE
+ } else {
+ t = pb.Transaction_CHAINCODE_QUERY
+ }
+ tx, err = pb.NewChaincodeExecute(spec, uuid, t)
+ if nil != err {
+ return nil, err
+ }
+ }
+ return tx, nil
+}
+
+// Invoke performs the supplied invocation on the specified chaincode through a transaction
+func (d *Devops) Invoke(ctx context.Context, chaincodeInvocationSpec *pb.ChaincodeInvocationSpec) (*pb.Response, error) {
+ return d.invokeOrQuery(ctx, chaincodeInvocationSpec, chaincodeInvocationSpec.ChaincodeSpec.Attributes, true)
+}
+
+// Query performs the supplied query on the specified chaincode through a transaction
+func (d *Devops) Query(ctx context.Context, chaincodeInvocationSpec *pb.ChaincodeInvocationSpec) (*pb.Response, error) {
+ return d.invokeOrQuery(ctx, chaincodeInvocationSpec, chaincodeInvocationSpec.ChaincodeSpec.Attributes, false)
+}
+
+// CheckSpec to see if chaincode resides within current package capture for language.
+func CheckSpec(spec *pb.ChaincodeSpec) error {
+ // Don't allow nil value
+ if spec == nil {
+ return errors.New("Expected chaincode specification, nil received")
+ }
+
+ platform, err := platforms.Find(spec.Type)
+ if err != nil {
+ return fmt.Errorf("Failed to determine platform type: %s", err)
+ }
+
+ return platform.ValidateSpec(spec)
+}
+
+// EXP_GetApplicationTCert retrieves an application TCert for the supplied user
+func (d *Devops) EXP_GetApplicationTCert(ctx context.Context, secret *pb.Secret) (*pb.Response, error) {
+ var sec crypto.Client
+ var err error
+
+ if d.isSecurityEnabled {
+ if devopsLogger.IsEnabledFor(logging.DEBUG) {
+ devopsLogger.Debug("Initializing secure devops using context %s", secret.EnrollId)
+ }
+ sec, err = crypto.InitClient(secret.EnrollId, nil)
+ defer crypto.CloseClient(sec)
+
+ if nil != err {
+ return &pb.Response{Status: pb.Response_FAILURE, Msg: []byte(err.Error())}, nil
+ }
+
+ devopsLogger.Debug("Getting TCert for id: %s", secret.EnrollId)
+ tcertHandler, err := sec.GetTCertificateHandlerNext()
+ if nil != err {
+ return &pb.Response{Status: pb.Response_FAILURE, Msg: []byte(err.Error())}, nil
+ }
+ certDER := tcertHandler.GetCertificate()
+ return &pb.Response{Status: pb.Response_SUCCESS, Msg: certDER}, nil
+ }
+ devopsLogger.Warning("Security NOT enabled")
+ return &pb.Response{Status: pb.Response_FAILURE, Msg: []byte("Security NOT enabled")}, nil
+ // TODO: Handle timeout and expiration
+}
+
+// EXP_PrepareForTx prepares a binding/TXHandler pair to be used in subsequent TX
+func (d *Devops) EXP_PrepareForTx(ctx context.Context, secret *pb.Secret) (*pb.Response, error) {
+ var sec crypto.Client
+ var err error
+ var txHandler crypto.TransactionHandler
+ var binding []byte
+
+ if d.isSecurityEnabled {
+ if devopsLogger.IsEnabledFor(logging.DEBUG) {
+ devopsLogger.Debug("Initializing secure devops using context %s", secret.EnrollId)
+ }
+ sec, err = crypto.InitClient(secret.EnrollId, nil)
+ defer crypto.CloseClient(sec)
+
+ if nil != err {
+ return &pb.Response{Status: pb.Response_FAILURE, Msg: []byte(err.Error())}, nil
+ }
+
+ devopsLogger.Debug("Getting TXHandler for id: %s", secret.EnrollId)
+ tcertHandler, err := sec.GetTCertificateHandlerNext()
+ if nil != err {
+ return &pb.Response{Status: pb.Response_FAILURE, Msg: []byte(err.Error())}, nil
+ }
+ txHandler, err = tcertHandler.GetTransactionHandler()
+ binding, err = txHandler.GetBinding()
+ if nil != err {
+ return &pb.Response{Status: pb.Response_FAILURE, Msg: []byte(err.Error())}, nil
+ }
+ // Now add to binding map
+ d.bindingMap.addBinding(binding, txHandler)
+ return &pb.Response{Status: pb.Response_SUCCESS, Msg: binding}, nil
+ }
+ devopsLogger.Warning("Security NOT enabled")
+ return &pb.Response{Status: pb.Response_FAILURE, Msg: []byte("Security NOT enabled")}, nil
+ // TODO: Handle timeout and expiration
+}
+
+// EXP_ProduceSigma produces a sigma as []byte and returns in response
+func (d *Devops) EXP_ProduceSigma(ctx context.Context, sigmaInput *pb.SigmaInput) (*pb.Response, error) {
+ var sec crypto.Client
+ var err error
+ var sigma []byte
+ secret := sigmaInput.Secret
+
+ type RBACMetatdata struct {
+ Cert []byte
+ Sigma []byte
+ }
+
+ if d.isSecurityEnabled {
+ if devopsLogger.IsEnabledFor(logging.DEBUG) {
+ devopsLogger.Debug("Initializing secure devops using context %s", secret.EnrollId)
+ }
+ sec, err = crypto.InitClient(secret.EnrollId, nil)
+ defer crypto.CloseClient(sec)
+
+ if nil != err {
+ return &pb.Response{Status: pb.Response_FAILURE, Msg: []byte(err.Error())}, nil
+ }
+
+ devopsLogger.Debug("Getting TCertHandler for id: %s, from DER = %s", secret.EnrollId, sigmaInput.AppTCert)
+ tcertHandler, err := sec.GetTCertificateHandlerFromDER(sigmaInput.AppTCert)
+ //tcertHandler, err := sec.GetTCertificateHandlerNext()
+ if nil != err {
+ return &pb.Response{Status: pb.Response_FAILURE, Msg: []byte(fmt.Errorf("Error getting TCertHandler from DER: %s", err).Error())}, nil
+ }
+ tcert := sigmaInput.AppTCert //tcertHandler.GetCertificate()
+ sigma, err = tcertHandler.Sign(append(tcert, sigmaInput.Data...))
+ if nil != err {
+ return &pb.Response{Status: pb.Response_FAILURE, Msg: []byte(fmt.Errorf("Error signing with TCertHandler from DER: %s", err).Error())}, nil
+ }
+ // Produce the SigmaOutput
+ asn1Encoding, err := asn1.Marshal(RBACMetatdata{Cert: tcert, Sigma: sigma})
+ if nil != err {
+ return &pb.Response{Status: pb.Response_FAILURE, Msg: []byte(err.Error())}, nil
+ }
+ sigmaOutput := &pb.SigmaOutput{Tcert: tcert, Sigma: sigma, Asn1Encoding: asn1Encoding}
+ sigmaOutputBytes, err := proto.Marshal(sigmaOutput)
+ if nil != err {
+ return &pb.Response{Status: pb.Response_FAILURE, Msg: []byte(err.Error())}, nil
+ }
+ return &pb.Response{Status: pb.Response_SUCCESS, Msg: sigmaOutputBytes}, nil
+ }
+ devopsLogger.Warning("Security NOT enabled")
+ return &pb.Response{Status: pb.Response_FAILURE, Msg: []byte("Security NOT enabled")}, nil
+
+}
+
+// EXP_ExecuteWithBinding executes a transaction with a specific binding/TXHandler
+func (d *Devops) EXP_ExecuteWithBinding(ctx context.Context, executeWithBinding *pb.ExecuteWithBinding) (*pb.Response, error) {
+
+ if d.isSecurityEnabled {
+ devopsLogger.Debug("Getting TxHandler for binding")
+
+ txHandler, err := d.bindingMap.getTxHandlerForBinding(executeWithBinding.Binding)
+
+ if nil != err {
+ return &pb.Response{Status: pb.Response_FAILURE, Msg: []byte(err.Error())}, nil
+ }
+
+ tid := util.GenerateUUID()
+
+ tx, err := txHandler.NewChaincodeExecute(executeWithBinding.ChaincodeInvocationSpec, tid)
+ if err != nil {
+ return nil, fmt.Errorf("Error creating executing with binding: %s", err)
+ }
+
+ return d.coord.ExecuteTransaction(tx), nil
+ //return &pb.Response{Status: pb.Response_FAILURE, Msg: []byte("NOT IMPLEMENTED")}, nil
+
+ //return &pb.Response{Status: pb.Response_SUCCESS, Msg: sigmaOutputBytes}, nil
+ }
+ devopsLogger.Warning("Security NOT enabled")
+ return &pb.Response{Status: pb.Response_FAILURE, Msg: []byte("Security NOT enabled")}, nil
+}
diff --git a/core/devops_test.go b/core/devops_test.go
new file mode 100644
index 00000000000..026b6a70d43
--- /dev/null
+++ b/core/devops_test.go
@@ -0,0 +1,94 @@
+/*
+Copyright IBM Corp. 2016 All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package core
+
+import (
+ "testing"
+
+ "golang.org/x/net/context"
+
+ pb "github.com/hyperledger/fabric/protos"
+)
+
+func TestDevops_Build_NilSpec(t *testing.T) {
+ t.Skip("Skipping until we have the Validator system setup properly for testing.")
+ // TODO Cannot pass in nil to NewDevopsServer
+ devopsServer := NewDevopsServer(nil)
+
+ _, err := devopsServer.Build(context.Background(), nil)
+ if err == nil {
+ t.Fail()
+ t.Log("Expected error in Devops.Build call with 'nil' spec:")
+ }
+ t.Logf("Got expected err: %s", err)
+ //performHandshake(t, peerClientConn)
+}
+
+func TestDevops_Build(t *testing.T) {
+ t.Skip("Skipping until we have the Validator system setup properly for testing.")
+ // TODO Cannot pass in nil to NewDevopsServer
+ devopsServer := NewDevopsServer(nil)
+
+ // Build the spec
+ chaincodePath := "github.com/hyperledger/fabric/core/example/chaincode/chaincode_example01"
+ spec := &pb.ChaincodeSpec{Type: pb.ChaincodeSpec_GOLANG, ChaincodeID: &pb.ChaincodeID{Path: chaincodePath}}
+
+ buildResult, err := devopsServer.Build(context.Background(), spec)
+ if err != nil {
+ t.Fail()
+ t.Logf("Error in Devops.Build call: %s", err)
+ }
+ t.Logf("Build result = %s", buildResult.ChaincodeSpec.ChaincodeID)
+ //performHandshake(t, peerClientConn)
+}
+
+func TestDevops_Deploy(t *testing.T) {
+ t.Skip("Skipping until we have the Validator system setup properly for testing.")
+ // TODO Cannot pass in nil to NewDevopsServer
+ devopsServer := NewDevopsServer(nil)
+
+ // Build the spec
+ chaincodePath := "github.com/hyperledger/fabric/core/example/chaincode/chaincode_example01"
+ spec := &pb.ChaincodeSpec{Type: pb.ChaincodeSpec_GOLANG, ChaincodeID: &pb.ChaincodeID{Path: chaincodePath}}
+
+ buildResult, err := devopsServer.Deploy(context.Background(), spec)
+ if err != nil {
+ t.Fail()
+ t.Logf("Error in Devops.Build call: %s", err)
+ }
+ t.Logf("Deploy result = %s", buildResult.ChaincodeSpec)
+ //performHandshake(t, peerClientConn)
+}
+
+func TestDevops_Spec_NoVersion(t *testing.T) {
+ t.Skip("Skipping until we have the Validator system setup properly for testing.")
+ // TODO Cannot pass in nil to NewDevopsServer
+ devopsServer := NewDevopsServer(nil)
+
+ // Build the spec
+ chaincodePath := "github.com/hyperledger/fabric/core/example/chaincode/chaincode_example01"
+ spec := &pb.ChaincodeSpec{Type: pb.ChaincodeSpec_GOLANG, ChaincodeID: &pb.ChaincodeID{Path: chaincodePath}}
+
+ buildResult, err := devopsServer.Deploy(context.Background(), spec)
+ if err == nil {
+ t.Fail()
+ t.Log("Expected error with no version specified")
+ return
+ }
+ t.Logf("Deploy result = %s, err = %s", buildResult, err)
+ //performHandshake(t, peerClientConn)
+}
diff --git a/core/discovery/discovery.go b/core/discovery/discovery.go
new file mode 100644
index 00000000000..e6fb106fdc0
--- /dev/null
+++ b/core/discovery/discovery.go
@@ -0,0 +1,119 @@
+/*
+Copyright IBM Corp. 2016 All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package discovery
+
+import (
+ "math/rand"
+ "sync"
+ "time"
+)
+
+// Discovery is the interface that consolidates bootstrap peer membership
+// selection and validating peer selection for non-validating peers
+type Discovery interface {
+ AddNode(string) bool // Add an address to the discovery list
+ RemoveNode(string) bool // Remove an address from the discovery list
+ GetAllNodes() []string // Return all addresses this peer maintains
+ GetRandomNodes(n int) []string // Return n random addresses for this peer to connect to
+ FindNode(string) bool // Find a node in the discovery list
+}
+
+// DiscoveryImpl is an implementation of Discovery
+type DiscoveryImpl struct {
+ sync.RWMutex
+ nodes map[string]bool
+ seq []string
+ random *rand.Rand
+}
+
+// NewDiscoveryImpl is a constructor of a Discovery implementation
+func NewDiscoveryImpl() *DiscoveryImpl {
+ di := DiscoveryImpl{}
+ di.nodes = make(map[string]bool)
+ di.random = rand.New(rand.NewSource(time.Now().Unix()))
+ return &di
+}
+
+// AddNode adds an address to the discovery list
+func (di *DiscoveryImpl) AddNode(address string) bool {
+ di.Lock()
+ defer di.Unlock()
+ if _, ok := di.nodes[address]; !ok {
+ di.seq = append(di.seq, address)
+ di.nodes[address] = true
+ }
+ return di.nodes[address]
+}
+
+// RemoveNode removes an address from the discovery list
+func (di *DiscoveryImpl) RemoveNode(address string) bool {
+ di.Lock()
+ defer di.Unlock()
+ if _, ok := di.nodes[address]; ok {
+ di.nodes[address] = false
+ return true
+ }
+ return false
+}
+
+// GetAllNodes returns an array of all addresses saved in the discovery list
+func (di *DiscoveryImpl) GetAllNodes() []string {
+ di.Lock()
+ defer di.Unlock()
+ var addresses []string
+ for address, valid := range di.nodes {
+ if valid {
+ addresses = append(addresses, address) // TODO Expensive, don't quite like it
+ }
+ }
+ return addresses
+}
+
+// GetRandomNodes returns n random nodes
+func (di *DiscoveryImpl) GetRandomNodes(n int) []string {
+ var pick string
+ randomNodes := make([]string, n)
+ di.Lock()
+ defer di.Unlock()
+ for i := 0; i < n; i++ {
+ for {
+ pick = di.seq[di.random.Intn(len(di.nodes))]
+ if di.nodes[pick] && !inArray(pick, randomNodes) {
+ break
+ }
+ }
+ randomNodes[i] = pick
+ }
+ return randomNodes
+}
+
+// FindNode returns true if its address is stored in the discovery list
+func (di *DiscoveryImpl) FindNode(address string) bool {
+ di.Lock()
+ defer di.Unlock()
+ _, ok := di.nodes[address]
+ return ok
+}
+
+func inArray(element string, array []string) bool {
+ for _, val := range array {
+ if val == element {
+ return true
+ }
+ }
+ return false
+}
diff --git a/core/discovery/discovery_test.go b/core/discovery/discovery_test.go
new file mode 100644
index 00000000000..f066b8c46fb
--- /dev/null
+++ b/core/discovery/discovery_test.go
@@ -0,0 +1,112 @@
+/*
+Copyright IBM Corp. 2016 All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package discovery
+
+import "testing"
+
+func TestZeroNodes(t *testing.T) {
+ disc := NewDiscoveryImpl()
+ nodes := disc.GetAllNodes()
+ if len(nodes) != 0 {
+ t.Fatalf("Expected empty list, got a list of size %d instead", len(nodes))
+ }
+}
+
+func TestAddFindNode(t *testing.T) {
+ disc := NewDiscoveryImpl()
+ res := disc.AddNode("foo")
+ if !res || !disc.FindNode("foo") {
+ t.Fatal("Unable to add a node to the discovery list")
+ }
+}
+
+func TestRemoveNode(t *testing.T) {
+ disc := NewDiscoveryImpl()
+ _ = disc.AddNode("foo")
+ if !disc.RemoveNode("foo") || len(disc.GetAllNodes()) != 0 {
+ t.Fatalf("Unable to remove a node from the discovery list")
+ }
+ if disc.RemoveNode("bar") {
+ t.Fatalf("Remove operation should have failed, element is not present in the list")
+ }
+}
+
+func TestGetAllNodes(t *testing.T) {
+ initList := []string{"a", "b", "c", "d"}
+ disc := NewDiscoveryImpl()
+ for i := range initList {
+ _ = disc.AddNode(initList[i])
+ }
+ nodes := disc.GetAllNodes()
+
+ expected := len(initList)
+ actual := len(nodes)
+ if actual != expected {
+ t.Fatalf("Nodes list length should have been %d but is %d", expected, actual)
+ return
+ }
+
+ for _, node := range nodes {
+ if !inArray(node, initList) {
+ t.Fatalf("%s is found in the discovery list but not in the initial list %v", node, initList)
+ }
+ }
+}
+
+func TestRandomNodes(t *testing.T) {
+ initList := []string{"a", "b", "c", "d", "e"}
+ disc := NewDiscoveryImpl()
+ for i := range initList {
+ _ = disc.AddNode(initList[i])
+ }
+ expectedCount := 2
+ randomSet := disc.GetRandomNodes(expectedCount)
+ actualCount := len(randomSet)
+ if actualCount != expectedCount {
+ t.Fatalf("Expected %d random nodes, got %d instead", expectedCount, actualCount)
+ }
+ for _, node := range randomSet {
+ if !inArray(node, initList) {
+ t.Fatalf("%s was randomly picked from the discovery list but is not in the initial list %v", node, initList)
+ }
+ }
+
+ // Does the random array contain duplicate values? And does it pick nodes that were previously removed?
+ removedElement := "d"
+ _ = disc.RemoveNode(removedElement)
+ for i := 0; i < 5; i++ {
+ trackDuplicates := make(map[string]bool)
+ anotherRandomSet := disc.GetRandomNodes(expectedCount)
+ if inArray(removedElement, anotherRandomSet) {
+ t.Fatalf("Random array %v contains element %v that was removed", anotherRandomSet, removedElement)
+ }
+ for _, v := range anotherRandomSet {
+ if _, ok := trackDuplicates[v]; ok {
+ t.Fatalf("Random array contains duplicate values: %v", anotherRandomSet)
+ }
+ trackDuplicates[v] = true
+ }
+ }
+
+ // Do we get a random element?
+ for i := 0; i < 100; i++ {
+ if anotherSet := disc.GetRandomNodes(1); anotherSet[0] != randomSet[0] {
+ return
+ }
+ }
+ t.Fatalf("Random returned value is always %s", randomSet[0])
+}
diff --git a/core/fsm.go b/core/fsm.go
new file mode 100644
index 00000000000..339d831250f
--- /dev/null
+++ b/core/fsm.go
@@ -0,0 +1,72 @@
+/*
+Copyright IBM Corp. 2016 All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package core
+
+import "github.com/looplab/fsm"
+
+// PeerConnectionFSM example FSM for demonstration purposes.
+type PeerConnectionFSM struct {
+ To string
+ FSM *fsm.FSM
+}
+
+// NewPeerConnectionFSM creates and returns a PeerConnectionFSM
+func NewPeerConnectionFSM(to string) *PeerConnectionFSM {
+ d := &PeerConnectionFSM{
+ To: to,
+ }
+
+ d.FSM = fsm.NewFSM(
+ "created",
+ fsm.Events{
+ {Name: "HELLO", Src: []string{"created"}, Dst: "established"},
+ {Name: "GET_PEERS", Src: []string{"established"}, Dst: "established"},
+ {Name: "PEERS", Src: []string{"established"}, Dst: "established"},
+ {Name: "PING", Src: []string{"established"}, Dst: "established"},
+ {Name: "DISCONNECT", Src: []string{"created", "established"}, Dst: "closed"},
+ },
+ fsm.Callbacks{
+ "enter_state": func(e *fsm.Event) { d.enterState(e) },
+ "before_HELLO": func(e *fsm.Event) { d.beforeHello(e) },
+ "after_HELLO": func(e *fsm.Event) { d.afterHello(e) },
+ "before_PING": func(e *fsm.Event) { d.beforePing(e) },
+ "after_PING": func(e *fsm.Event) { d.afterPing(e) },
+ },
+ )
+
+ return d
+}
+
+func (d *PeerConnectionFSM) enterState(e *fsm.Event) {
+ log.Debugf("The bi-directional stream to %s is %s, from event %s\n", d.To, e.Dst, e.Event)
+}
+
+func (d *PeerConnectionFSM) beforeHello(e *fsm.Event) {
+ log.Debugf("Before reception of %s, dest is %s, current is %s", e.Event, e.Dst, d.FSM.Current())
+}
+
+func (d *PeerConnectionFSM) afterHello(e *fsm.Event) {
+ log.Debugf("After reception of %s, dest is %s, current is %s", e.Event, e.Dst, d.FSM.Current())
+}
+
+func (d *PeerConnectionFSM) afterPing(e *fsm.Event) {
+ log.Debugf("After reception of %s, dest is %s, current is %s", e.Event, e.Dst, d.FSM.Current())
+}
+
+func (d *PeerConnectionFSM) beforePing(e *fsm.Event) {
+ log.Debugf("Before %s, dest is %s, current is %s", e.Event, e.Dst, d.FSM.Current())
+}
diff --git a/core/fsm_test.go b/core/fsm_test.go
new file mode 100644
index 00000000000..106cc2efa9c
--- /dev/null
+++ b/core/fsm_test.go
@@ -0,0 +1,131 @@
+/*
+Copyright IBM Corp. 2016 All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package core
+
+import (
+ "testing"
+
+ "github.com/looplab/fsm"
+)
+
+func simulateConn(tb testing.TB) {
+ peerConn := NewPeerConnectionFSM("10.10.10.10:30303")
+
+ err := peerConn.FSM.Event("HELLO")
+ if err != nil {
+ tb.Error(err)
+ }
+
+ err = peerConn.FSM.Event("DISCONNECT")
+ if err != nil {
+ tb.Error(err)
+ }
+
+}
+
+func TestFSM_PeerConnection(t *testing.T) {
+ peerConn := NewPeerConnectionFSM("10.10.10.10:30303")
+
+ err := peerConn.FSM.Event("HELLO")
+ if err != nil {
+ t.Error(err)
+ }
+ if peerConn.FSM.Current() != "established" {
+ t.Error("Expected to be in establised state")
+ }
+
+ err = peerConn.FSM.Event("DISCONNECT")
+ if err != nil {
+ t.Error(err)
+ }
+}
+
+func TestFSM_PeerConnection2(t *testing.T) {
+ peerConn := NewPeerConnectionFSM("10.10.10.10:30303")
+
+ err := peerConn.FSM.Event("HELLO")
+ if err != nil {
+ t.Error(err)
+ }
+ if peerConn.FSM.Current() != "established" {
+ t.Error("Expected to be in establised state")
+ }
+
+ err = peerConn.FSM.Event("DISCONNECT")
+ if err != nil {
+ t.Error(err)
+ }
+}
+
+func TestFSM_PeerConnection_BadState_1(t *testing.T) {
+ peerConn := NewPeerConnectionFSM("10.10.10.10:30303")
+
+ // Try to move from created state
+ err := peerConn.FSM.Event("GET_PEERS")
+ if err == nil {
+ t.Error("Expected bad state message")
+ }
+
+ err = peerConn.FSM.Event("PING")
+ if err == nil {
+ t.Error("Expected bad state message")
+ }
+
+ err = peerConn.FSM.Event("DISCONNECT")
+ if err != nil {
+ t.Error(err)
+ }
+
+}
+
+func TestFSM_PeerConnection_BadState_2(t *testing.T) {
+ peerConn := NewPeerConnectionFSM("10.10.10.10:30303")
+
+ // Try to move from created state
+ err := peerConn.FSM.Event("GET_PEERS")
+ if err == nil {
+ t.Error("Expected bad state message")
+ }
+
+ err = peerConn.FSM.Event("PING")
+ if err == nil {
+ t.Error("Expected bad state message")
+ }
+}
+
+func TestFSM_PeerConnection_BadEvent(t *testing.T) {
+ peerConn := NewPeerConnectionFSM("10.10.10.10:30303")
+
+ // Try to move from created state
+ err := peerConn.FSM.Event("UNDEFINED_EVENT")
+ if err == nil {
+ t.Error("Expected bad event message")
+ } else {
+ // Make sure expected error type
+ if _, ok := err.(*fsm.UnknownEventError); !ok {
+ t.Error("expected only 'fsm.UnknownEventError'")
+ }
+ t.Logf("Received expected error: %s", err)
+ }
+
+}
+
+func Benchmark_FSM(b *testing.B) {
+ for i := 0; i < b.N; i++ {
+ simulateConn(b)
+ }
+}
diff --git a/core/ledger/README.md b/core/ledger/README.md
new file mode 100644
index 00000000000..24d3fe3e580
--- /dev/null
+++ b/core/ledger/README.md
@@ -0,0 +1,17 @@
+## Ledger Package
+
+This package implements the ledger, which includes the blockchain and global state.
+
+If you're looking for API to work with the blockchain or state, look in `ledger.go`. This is the file where all public functions are exposed and is extensively documented. The sections in the file are:
+
+### Transaction-batch functions
+
+These are functions that consensus should call. `BeginTxBatch` followed by `CommitTxBatch` or `RollbackTxBatch`. These functions will add a block to the blockchain with the specified transactions.
+
+### World-state functions
+
+These functions are used to modify the global state. They would generally be called by the VM based on requests from chaincode.
+
+### Blockchain functions
+
+These functions can be used to retrieve blocks/transactions from the blockchain or other information such as the blockchain size. Addition of blocks to the blockchain is done though the transaction-batch related functions.
diff --git a/core/ledger/benchmark_scripts/buckettree/buckettree.sh b/core/ledger/benchmark_scripts/buckettree/buckettree.sh
new file mode 100755
index 00000000000..c11b8ab7109
--- /dev/null
+++ b/core/ledger/benchmark_scripts/buckettree/buckettree.sh
@@ -0,0 +1,81 @@
+#!/bin/bash
+source ../common.sh
+
+PKG_PATH="github.com/hyperledger/fabric/core/ledger/statemgmt/buckettree"
+FUNCTION_NAME="BenchmarkStateHash"
+NUM_CPUS=1
+CHART_DATA_COLUMN="NUM EXISTING KEYS"
+export PEER_LEDGER_TEST_LOADYAML=false
+
+function runTest {
+ OUTPUT_DIR="$FUNCTION_NAME/${NumBuckets}_${KVSize}"
+ DB_DIR="$FUNCTION_NAME/${NumBuckets}_${KVSize}"
+ TEST_PARAMS="-NumBuckets=$NumBuckets,\
+ -MaxGroupingAtEachLevel=$MaxGroupingAtEachLevel,\
+ -ChaincodeIDPrefix=$ChaincodeIDPrefix,\
+ -NumChaincodes=$NumChaincodes,\
+ -MaxKeySuffix=$MaxKeySuffix,\
+ -NumKeysToInsert=$NumKeysToInsert,\
+ -KVSize=$KVSize"
+
+ setupAndCompileTest
+
+ for i in `seq 0 999`; do
+ EXISTING_KEYS_IN_DB=$(($i*$NumKeysToInsert))
+ echo "executing with existing keys=$EXISTING_KEYS_IN_DB"
+ CHART_COLUMN_VALUE=$EXISTING_KEYS_IN_DB
+ executeTest
+ done
+
+ ADDITIONAL_TEST_FLAGS="-test.cpuprofile=cpu.out -test.outputdir=`getOuputDir`"
+ CHART_COLUMN_VALUE=$(($(($i+1))*$NumKeysToInsert))
+ executeTest
+ constructChart
+}
+
+##### TEST PARAMS
+MaxGroupingAtEachLevel=5
+ChaincodeIDPrefix="chaincode"
+NumChaincodes=5
+MaxKeySuffix=1000000
+NumKeysToInsert=1000
+
+NumBuckets=1009;KVSize=20;runTest
+NumBuckets=10009;KVSize=20;runTest
+NumBuckets=100003;KVSize=20;runTest
+NumBuckets=1000003;KVSize=20;runTest
+
+NumBuckets=1009;KVSize=50;runTest
+NumBuckets=10009;KVSize=50;runTest
+NumBuckets=100003;KVSize=50;runTest
+NumBuckets=1000003;KVSize=50;runTest
+
+NumBuckets=1009;KVSize=100;runTest
+NumBuckets=10009;KVSize=100;runTest
+NumBuckets=100003;KVSize=100;runTest
+NumBuckets=1000003;KVSize=100;runTest
+
+NumBuckets=1009;KVSize=300;runTest
+NumBuckets=10009;KVSize=300;runTest
+NumBuckets=100003;KVSize=300;runTest
+NumBuckets=1000003;KVSize=300;runTest
+
+NumBuckets=1009;KVSize=500;runTest
+NumBuckets=10009;KVSize=500;runTest
+NumBuckets=100003;KVSize=500;runTest
+NumBuckets=1000003;KVSize=500;runTest
+
+NumBuckets=1009;KVSize=1000;runTest
+NumBuckets=10009;KVSize=1000;runTest
+NumBuckets=100003;KVSize=1000;runTest
+NumBuckets=1000003;KVSize=1000;runTest
+
+NumBuckets=1009;KVSize=2000;runTest
+NumBuckets=10009;KVSize=2000;runTest
+NumBuckets=100003;KVSize=2000;runTest
+NumBuckets=1000003;KVSize=2000;runTest
+
+NumBuckets=1009;KVSize=5000;runTest
+NumBuckets=10009;KVSize=5000;runTest
+NumBuckets=100003;KVSize=5000;runTest
+NumBuckets=1000003;KVSize=5000;runTest
diff --git a/core/ledger/benchmark_scripts/buckettree/plot.pg b/core/ledger/benchmark_scripts/buckettree/plot.pg
new file mode 100644
index 00000000000..ec014546243
--- /dev/null
+++ b/core/ledger/benchmark_scripts/buckettree/plot.pg
@@ -0,0 +1,16 @@
+#!/usr/local/bin/gnuplot
+reset
+
+# Chart specific settings
+set ylabel "milli second"
+set xlabel "Existing Data"
+set title "Buckettree performance"
+
+# General settings
+set key reverse Left outside
+set grid
+set terminal postscript dashed color
+set style data linespoints
+
+# plot command
+plot dataFile using 1:($2/1000000) title "time taken"
diff --git a/core/ledger/benchmark_scripts/buckettree/plots/all.pg b/core/ledger/benchmark_scripts/buckettree/plots/all.pg
new file mode 100644
index 00000000000..b5bdd0eef16
--- /dev/null
+++ b/core/ledger/benchmark_scripts/buckettree/plots/all.pg
@@ -0,0 +1,24 @@
+#!/usr/local/bin/gnuplot
+reset
+
+# Chart specific settings
+set ylabel "milli second"
+set xlabel "Approx number of existing keys"
+set title "Buckettree performance"
+
+# General settings
+#set key bottom center outside reverse box
+set key left top reverse box Left
+set key spacing 1 font ",9"
+set grid
+set terminal postscript enhanced color
+#set style data linespoints
+set style data lines
+
+# plot command
+plot '../output1/chart.dat' using 1:($2/1000000) title "NumBuckets=10009, MaxGroupingAtEachLevel=5, ValueSize=1000", \
+'../output2/chart.dat' using 1:($2/1000000) title "NumBuckets=10009, MaxGroupingAtEachLevel=5, ValueSize=100", \
+'../output3/chart.dat' using 1:($2/1000000) title "NumBuckets=10009, MaxGroupingAtEachLevel=5, ValueSize=10", \
+'../output5/chart.dat' using 1:($2/1000000) title "NumBuckets=100003, MaxGroupingAtEachLevel=5, ValueSize=1000", \
+'../output4/chart.dat' using 1:($2/1000000) title "NumBuckets=1000003,MaxGroupingAtEachLevel=5, ValueSize=1000", \
+'../output8/chart.dat' using 1:($2/1000000) title "NumBuckets=1000003,MaxGroupingAtEachLevel=5, ValueSize=10"
diff --git a/core/ledger/benchmark_scripts/common.sh b/core/ledger/benchmark_scripts/common.sh
new file mode 100644
index 00000000000..1f78cf35f97
--- /dev/null
+++ b/core/ledger/benchmark_scripts/common.sh
@@ -0,0 +1,128 @@
+#!/bin/bash
+
+set -e
+
+OUTPUT_DIR_ROOT=`echo ~/obc_perf/output`
+DB_DIR_ROOT=`echo ~/obc_perf/db`
+BINARY_DIR=`echo ~/obc_perf/bin`
+
+mkdir -p $OUTPUT_DIR_ROOT
+mkdir -p $DB_DIR_ROOT
+mkdir -p $BINARY_DIR
+
+BENCHMARK_OUTPUT_FILE="benchmark.out"
+CHART_DATA_FILE="chart.dat"
+CHART_FILE="chart.ps"
+
+regex="^Benchmark.*[[:blank:]]+[[:digit:]]+[[:blank:]]+([[:digit:]]+).*$"
+function writeBenchmarkOutput {
+ #echo "$@"
+ outFile=$1
+ benchmarkFile=$2
+ paramValue=$3
+ cmdOutput=$4
+ echo "outFile=$outFile, benchmarkFile=$benchmarkFile, paramValue=$paramValue"
+ echo "Test Output Start:"
+ echo "$cmdOutput"
+ echo "Test Output Finish"
+ while read -r line; do
+ echo $line >> $outFile
+ if [[ $line =~ $regex ]]; then
+ benchmarkDataLine="$paramValue ${BASH_REMATCH[1]}"
+ echo $benchmarkDataLine >> $benchmarkFile
+ fi
+ done <<< "$cmdOutput"
+}
+
+function setupAndCompileTest {
+ createOutputDir
+ configureDBPath
+ compileTest
+ writeBenchmarkHeader
+}
+
+function compileTest {
+ cmd="go test $PKG_PATH -c -o `getBinaryFileName`"
+ `eval $cmd`
+}
+
+function writeBenchmarkHeader {
+ outputDir=`getOuputDir`
+ echo "# `date`" >> $outputDir/$CHART_DATA_FILE
+ echo "# TEST_PARAMS $TEST_PARAMS" >> $outputDir/$CHART_DATA_FILE
+ echo "# $CHART_DATA_COLUMN | ns/ops" >> $outputDir/$CHART_DATA_FILE
+}
+
+## Execute test and generate data file
+function executeTest {
+ cmd="`getBinaryFileName` -testParams=\"$TEST_PARAMS\" -test.run=XXX -test.bench=$FUNCTION_NAME -test.cpu=$NUM_CPUS $ADDITIONAL_TEST_FLAGS $PKG_PATH"
+ outputDir=`getOuputDir`
+ dbDir=`getDBDir`
+ echo ""
+ echo "Executing test... [OUTPUT_DIR=$outputDir, DB_DIR=$dbDir]"
+ echo $cmd
+ cmdOutput=`eval $cmd`
+ writeBenchmarkOutput $outputDir/$BENCHMARK_OUTPUT_FILE $outputDir/$CHART_DATA_FILE $CHART_COLUMN_VALUE "$cmdOutput"
+}
+
+function getBinaryFileName {
+ pkgName=$(basename $PKG_PATH)
+ echo "$BINARY_DIR/$pkgName.test"
+}
+
+function getOuputDir {
+ pkgName=$(basename $PKG_PATH)
+ outputDir="$OUTPUT_DIR_ROOT/$pkgName/$FUNCTION_NAME"
+ if [ ! -z "$OUTPUT_DIR" ]; then
+ outputDir="$OUTPUT_DIR_ROOT/$pkgName/$OUTPUT_DIR"
+ fi
+ echo $outputDir
+}
+
+function getDBDir {
+ pkgName=$(basename $PKG_PATH)
+ dbDir="$DB_DIR_ROOT/$pkgName/$FUNCTION_NAME"
+ if [ ! -z "$DB_DIR" ]; then
+ dbDir="$DB_DIR_ROOT/$pkgName/$DB_DIR"
+ fi
+ echo $dbDir
+}
+
+function createOutputDir {
+ outputDir=`getOuputDir`
+ if [ ! -d "$outputDir" ]; then
+ mkdir -p $outputDir
+ else
+ echo "INFO: outputDIR [$outputDir] already exists. Output will be appended to existing file"
+ fi
+}
+
+function configureDBPath {
+ dbDir=`getDBDir`
+ if [ -d "$dbDir" ]; then
+ echo "INFO: dbDir [$dbDir] already exists. Data will be merged in the existing data"
+ fi
+ ulimit -n 10000
+ echo "setting ulimit=`ulimit -n`"
+ export PEER_FILESYSTEMPATH="$dbDir"
+}
+
+function constructChart {
+ outputDir=`getOuputDir`
+ gnuplot -e "dataFile='$outputDir/$CHART_DATA_FILE'" plot.pg > $outputDir/$CHART_FILE
+}
+
+function openChart {
+ outputDir=`getOuputDir`
+ open "$outputDir/$CHART_FILE"
+}
+
+function clearOSCache {
+ platform=`uname`
+ if [[ $platform == 'Darwin' ]]; then
+ echo "Clearing os cache"
+ sudo purge
+ else
+ echo "WARNING: Platform [$platform] is not supported for clearing os cache."
+ fi
+}
diff --git a/core/ledger/benchmark_scripts/ledger/db.sh b/core/ledger/benchmark_scripts/ledger/db.sh
new file mode 100755
index 00000000000..26138e2ff23
--- /dev/null
+++ b/core/ledger/benchmark_scripts/ledger/db.sh
@@ -0,0 +1,27 @@
+#!/bin/bash
+source ../common.sh
+
+PKG_PATH="github.com/hyperledger/fabric/core/ledger"
+FUNCTION_NAME="BenchmarkDB"
+NUM_CPUS=1
+CHART_DATA_COLUMN="Number of Bytes"
+
+setupAndCompileTest
+
+KVSize=1000
+MaxKeySuffix=1000000
+KeyPrefix=ChaincodeKey_
+
+CHART_COLUMN_VALUE=$KVSize
+
+## now populate the db with 'MaxKeySuffix' number of key-values
+TEST_PARAMS="-KVSize=$KVSize, -PopulateDB=true, -MaxKeySuffix=$MaxKeySuffix, -KeyPrefix=$KeyPrefix"
+executeTest
+
+# now perform random access test. If you want to perform the random access test
+TEST_PARAMS="-KVSize=$KVSize, -PopulateDB=false, -MaxKeySuffix=$MaxKeySuffix, -KeyPrefix=$KeyPrefix"
+executeTest
+
+# now perform random access test after clearing OS file-system cache. If you want to perform the random access test
+clearOSCache
+executeTest
diff --git a/core/ledger/benchmark_scripts/ledger/randomTransactions.sh b/core/ledger/benchmark_scripts/ledger/randomTransactions.sh
new file mode 100755
index 00000000000..f491229cba4
--- /dev/null
+++ b/core/ledger/benchmark_scripts/ledger/randomTransactions.sh
@@ -0,0 +1,90 @@
+#!/bin/bash
+source ../common.sh
+
+
+PKG_PATH="github.com/hyperledger/fabric/core/ledger"
+NUM_CPUS=4
+CHART_DATA_COLUMN="KVSize"
+
+compileTest
+OUTPUT_DIR="BenchmarkLedgerRandomTransactions"
+createOutputDir
+CHART_DATA_COLUMN="TEST_PARAMS"
+writeBenchmarkHeader
+
+function populateDB {
+ FUNCTION_NAME="BenchmarkLedgerPopulate"
+ TEST_PARAMS="-KeyPrefix=$KeyPrefix, -KVSize=$KVSize, -BatchSize=$BatchSize, -MaxKeySuffix=$MaxKeySuffix"
+ CHART_COLUMN_VALUE="POPULATE_DB:Type=$LEDGER_STATE_DATASTRUCTURE_NAME:KeyPrefix=$KeyPrefix:KVSize=$KVSize:BatchSize=$BatchSize:MaxKeySuffix=$MaxKeySuffix:TestNumber=$TestNumber"
+ executeTest
+}
+
+function runRandomTransactions {
+ FUNCTION_NAME="BenchmarkLedgerRandomTransactions"
+ TEST_PARAMS="-KeyPrefix=$KeyPrefix, -KVSize=$KVSize, -BatchSize=$BatchSize, -MaxKeySuffix=$MaxKeySuffix, -NumBatches=$NumBatches, -NumReadsFromLedger=$NumReadsFromLedger, -NumWritesToLedger=$NumWritesToLedger"
+ CHART_COLUMN_VALUE="RANDOM_TRANSACTION_EXE:Type=$LEDGER_STATE_DATASTRUCTURE_NAME:KeyPrefix=$KeyPrefix:KVSize=$KVSize:BatchSize=$BatchSize:MaxKeySuffix=$MaxKeySuffix:NumBatches=$NumBatches:NumReadsFromLedger=$NumReadsFromLedger:NumWritesToLedger=$NumWritesToLedger:TestNumber=$TestNumber"
+ executeTest
+}
+
+function initDBPath {
+ DB_DIR="BenchmarkLedgerRandomTransactions/TestNumber=$TestNumber"
+ configureDBPath
+}
+
+function runTest {
+ initDBPath
+ populateDB
+ if [ "$CLEAR_OS_CACHE" == "true" ]; then
+ clearOSCache
+ fi
+ runRandomTransactions
+}
+
+KeyPrefix=key_
+MaxKeySuffix=1000000
+
+export LEDGER_STATE_DATASTRUCTURE_NAME="buckettree"
+
+# Before performing any of the following tests, manually delete the following folders from previous runs (if any)
+# ~/obc_perf/db (Contains the db from the test run)
+# ~/obc_perf/output/ledger (Contains the output from the test run)
+
+################## Measure the effect of bucket-cache START ############################
+# For enabling cache -
+# 1) Change the value of 0 of 'bucketCacheSize' in test.yaml to 100
+# 2) Comment the following three lines and uncomment the next three lines
+
+TestNumber=1;KVSize=100;BatchSize=100;NumBatches=1000;NumReadsFromLedger=1;NumWritesToLedger=1;runTest
+TestNumber=2;KVSize=100;BatchSize=100;NumBatches=1000;NumReadsFromLedger=1;NumWritesToLedger=4;runTest
+TestNumber=3;KVSize=100;BatchSize=100;NumBatches=1000;NumReadsFromLedger=4;NumWritesToLedger=1;runTest
+
+#TestNumber=4;KVSize=100;BatchSize=100;NumBatches=1000;NumReadsFromLedger=1;NumWritesToLedger=1;runTest
+#TestNumber=5;KVSize=100;BatchSize=100;NumBatches=1000;NumReadsFromLedger=1;NumWritesToLedger=4;runTest
+#TestNumber=6;KVSize=100;BatchSize=100;NumBatches=1000;NumReadsFromLedger=4;NumWritesToLedger=1;runTest
+################## Measure the effect of bucket-cache END ############################
+
+: '
+################### Compare with raw state implementation START ############################
+CLEAR_OS_CACHE=false
+export LEDGER_STATE_DATASTRUCTURE_NAME="raw"
+TestNumber=1;KVSize=100;BatchSize=100;NumBatches=1000;NumReadsFromLedger=1;NumWritesToLedger=1;runTest
+TestNumber=2;KVSize=100;BatchSize=100;NumBatches=1000;NumReadsFromLedger=1;NumWritesToLedger=4;runTest
+TestNumber=3;KVSize=100;BatchSize=100;NumBatches=1000;NumReadsFromLedger=4;NumWritesToLedger=1;runTest
+
+export LEDGER_STATE_DATASTRUCTURE_NAME="buckettree"
+TestNumber=4;KVSize=100;BatchSize=100;NumBatches=1000;NumReadsFromLedger=1;NumWritesToLedger=1;runTest
+TestNumber=5;KVSize=100;BatchSize=100;NumBatches=1000;NumReadsFromLedger=1;NumWritesToLedger=4;runTest
+TestNumber=6;KVSize=100;BatchSize=100;NumBatches=1000;NumReadsFromLedger=4;NumWritesToLedger=1;runTest
+
+CLEAR_OS_CACHE=true
+export LEDGER_STATE_DATASTRUCTURE_NAME="raw"
+TestNumber=7;KVSize=100;BatchSize=100;NumBatches=1000;NumReadsFromLedger=1;NumWritesToLedger=1;runTest
+TestNumber=8;KVSize=100;BatchSize=100;NumBatches=1000;NumReadsFromLedger=1;NumWritesToLedger=4;runTest
+TestNumber=9;KVSize=100;BatchSize=100;NumBatches=1000;NumReadsFromLedger=4;NumWritesToLedger=1;runTest
+
+export LEDGER_STATE_DATASTRUCTURE_NAME="buckettree"
+TestNumber=10;KVSize=100;BatchSize=100;NumBatches=1000;NumReadsFromLedger=1;NumWritesToLedger=1;runTest
+TestNumber=11;KVSize=100;BatchSize=100;NumBatches=1000;NumReadsFromLedger=1;NumWritesToLedger=4;runTest
+TestNumber=12;KVSize=100;BatchSize=100;NumBatches=1000;NumReadsFromLedger=4;NumWritesToLedger=1;runTest
+################### Compare with raw state implementation END ############################
+'
diff --git a/core/ledger/benchmark_scripts/ledger/singleKeyTransaction.sh b/core/ledger/benchmark_scripts/ledger/singleKeyTransaction.sh
new file mode 100755
index 00000000000..b5eef2b6e1d
--- /dev/null
+++ b/core/ledger/benchmark_scripts/ledger/singleKeyTransaction.sh
@@ -0,0 +1,20 @@
+#!/bin/bash
+source ../common.sh
+
+PKG_PATH="github.com/hyperledger/fabric/core/ledger"
+FUNCTION_NAME="BenchmarkLedgerSingleKeyTransaction"
+NUM_CPUS=4
+CHART_DATA_COLUMN="Number of Bytes"
+
+setupAndCompileTest
+
+Key=key
+KVSize=100
+BatchSize=100
+NumBatches=10000
+NumWritesToLedger=2
+
+CHART_COLUMN_VALUE=$KVSize
+
+TEST_PARAMS="-Key=$Key, -KVSize=$KVSize, -BatchSize=$BatchSize, -NumBatches=$NumBatches, -NumWritesToLedger=$NumWritesToLedger"
+executeTest
diff --git a/core/ledger/benchmark_scripts/ledger/test.yaml b/core/ledger/benchmark_scripts/ledger/test.yaml
new file mode 100644
index 00000000000..37a75609914
--- /dev/null
+++ b/core/ledger/benchmark_scripts/ledger/test.yaml
@@ -0,0 +1,14 @@
+###############################################################################
+#
+# Peer section
+#
+###############################################################################
+peer:
+
+ledger:
+ state:
+ dataStructure:
+ configs:
+ numBuckets: 1000003
+ maxGroupingAtEachLevel: 5
+ bucketCacheSize: 100
diff --git a/core/ledger/benchmark_scripts/statemgmt/cryptoHash.sh b/core/ledger/benchmark_scripts/statemgmt/cryptoHash.sh
new file mode 100755
index 00000000000..4c467f6de00
--- /dev/null
+++ b/core/ledger/benchmark_scripts/statemgmt/cryptoHash.sh
@@ -0,0 +1,17 @@
+#!/bin/bash
+source ../common.sh
+
+PKG_PATH="github.com/hyperledger/fabric/core/ledger/statemgmt"
+FUNCTION_NAME="BenchmarkCryptoHash"
+NUM_CPUS=1
+CHART_DATA_COLUMN="Number of Bytes"
+
+setupAndCompileTest
+
+for i in 1 5 10 20 50 100 200 400 600 800 1000 2000 5000 10000 20000 50000 100000; do
+ TEST_PARAMS="-NumBytes=$i"
+ CHART_COLUMN_VALUE=$i
+ executeTest
+done
+
+constructChart
diff --git a/core/ledger/benchmark_scripts/statemgmt/plot.pg b/core/ledger/benchmark_scripts/statemgmt/plot.pg
new file mode 100644
index 00000000000..d8740857280
--- /dev/null
+++ b/core/ledger/benchmark_scripts/statemgmt/plot.pg
@@ -0,0 +1,18 @@
+#!/usr/local/bin/gnuplot
+reset
+
+# Chart specific settings
+set ylabel "nano second"
+set xlabel "Number of bytes"
+set title "CryptoHash performance"
+set logscale xy 10
+
+# General settings
+set key reverse Left outside
+set grid
+set terminal postscript dashed color
+set style data linespoints
+
+# plot command
+plot dataFile using 1:2 title "time taken", \
+"" using 1:($2/$1) title "time taken per byte"
diff --git a/core/ledger/blockchain.go b/core/ledger/blockchain.go
new file mode 100644
index 00000000000..4732439078e
--- /dev/null
+++ b/core/ledger/blockchain.go
@@ -0,0 +1,331 @@
+/*
+Copyright IBM Corp. 2016 All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package ledger
+
+import (
+ "bytes"
+ "encoding/binary"
+ "strconv"
+
+ "github.com/hyperledger/fabric/core/db"
+ "github.com/hyperledger/fabric/core/util"
+ "github.com/hyperledger/fabric/protos"
+ "github.com/tecbot/gorocksdb"
+ "golang.org/x/net/context"
+)
+
+// Blockchain holds basic information in memory. Operations on Blockchain are not thread-safe
+// TODO synchronize access to in-memory variables
+type blockchain struct {
+ size uint64
+ previousBlockHash []byte
+ indexer blockchainIndexer
+ lastProcessedBlock *lastProcessedBlock
+}
+
+type lastProcessedBlock struct {
+ block *protos.Block
+ blockNumber uint64
+ blockHash []byte
+}
+
+var indexBlockDataSynchronously = true
+
+func newBlockchain() (*blockchain, error) {
+ size, err := fetchBlockchainSizeFromDB()
+ if err != nil {
+ return nil, err
+ }
+ blockchain := &blockchain{0, nil, nil, nil}
+ blockchain.size = size
+ if size > 0 {
+ previousBlock, err := fetchBlockFromDB(size - 1)
+ if err != nil {
+ return nil, err
+ }
+ previousBlockHash, err := previousBlock.GetHash()
+ if err != nil {
+ return nil, err
+ }
+ blockchain.previousBlockHash = previousBlockHash
+ }
+
+ err = blockchain.startIndexer()
+ if err != nil {
+ return nil, err
+ }
+ return blockchain, nil
+}
+
+func (blockchain *blockchain) startIndexer() (err error) {
+ if indexBlockDataSynchronously {
+ blockchain.indexer = newBlockchainIndexerSync()
+ } else {
+ blockchain.indexer = newBlockchainIndexerAsync()
+ }
+ err = blockchain.indexer.start(blockchain)
+ return
+}
+
+// getLastBlock get last block in blockchain
+func (blockchain *blockchain) getLastBlock() (*protos.Block, error) {
+ if blockchain.size == 0 {
+ return nil, nil
+ }
+ return blockchain.getBlock(blockchain.size - 1)
+}
+
+// getSize number of blocks in blockchain
+func (blockchain *blockchain) getSize() uint64 {
+ return blockchain.size
+}
+
+// getBlock get block at arbitrary height in block chain
+func (blockchain *blockchain) getBlock(blockNumber uint64) (*protos.Block, error) {
+ return fetchBlockFromDB(blockNumber)
+}
+
+// getBlockByHash get block by block hash
+func (blockchain *blockchain) getBlockByHash(blockHash []byte) (*protos.Block, error) {
+ blockNumber, err := blockchain.indexer.fetchBlockNumberByBlockHash(blockHash)
+ if err != nil {
+ return nil, err
+ }
+ return blockchain.getBlock(blockNumber)
+}
+
+func (blockchain *blockchain) getTransactionByUUID(txUUID string) (*protos.Transaction, error) {
+ blockNumber, txIndex, err := blockchain.indexer.fetchTransactionIndexByUUID(txUUID)
+ if err != nil {
+ return nil, err
+ }
+ block, err := blockchain.getBlock(blockNumber)
+ if err != nil {
+ return nil, err
+ }
+ transaction := block.GetTransactions()[txIndex]
+ return transaction, nil
+}
+
+// getTransactions get all transactions in a block identified by block number
+func (blockchain *blockchain) getTransactions(blockNumber uint64) ([]*protos.Transaction, error) {
+ block, err := blockchain.getBlock(blockNumber)
+ if err != nil {
+ return nil, err
+ }
+ return block.GetTransactions(), nil
+}
+
+// getTransactionsByBlockHash get all transactions in a block identified by block hash
+func (blockchain *blockchain) getTransactionsByBlockHash(blockHash []byte) ([]*protos.Transaction, error) {
+ block, err := blockchain.getBlockByHash(blockHash)
+ if err != nil {
+ return nil, err
+ }
+ return block.GetTransactions(), nil
+}
+
+// getTransaction get a transaction identified by block number and index within the block
+func (blockchain *blockchain) getTransaction(blockNumber uint64, txIndex uint64) (*protos.Transaction, error) {
+ block, err := blockchain.getBlock(blockNumber)
+ if err != nil {
+ return nil, err
+ }
+ return block.GetTransactions()[txIndex], nil
+}
+
+// getTransactionByBlockHash get a transaction identified by block hash and index within the block
+func (blockchain *blockchain) getTransactionByBlockHash(blockHash []byte, txIndex uint64) (*protos.Transaction, error) {
+ block, err := blockchain.getBlockByHash(blockHash)
+ if err != nil {
+ return nil, err
+ }
+ return block.GetTransactions()[txIndex], nil
+}
+
+func (blockchain *blockchain) getBlockchainInfo() (*protos.BlockchainInfo, error) {
+ if blockchain.getSize() == 0 {
+ return &protos.BlockchainInfo{Height: 0}, nil
+ }
+
+ lastBlock, err := blockchain.getLastBlock()
+ if err != nil {
+ return nil, err
+ }
+
+ info := blockchain.getBlockchainInfoForBlock(blockchain.getSize(), lastBlock)
+ return info, nil
+}
+
+func (blockchain *blockchain) getBlockchainInfoForBlock(height uint64, block *protos.Block) *protos.BlockchainInfo {
+ hash, _ := block.GetHash()
+ info := &protos.BlockchainInfo{
+ Height: height,
+ CurrentBlockHash: hash,
+ PreviousBlockHash: block.PreviousBlockHash}
+
+ return info
+}
+
+func (blockchain *blockchain) buildBlock(block *protos.Block, stateHash []byte) *protos.Block {
+ block.SetPreviousBlockHash(blockchain.previousBlockHash)
+ block.StateHash = stateHash
+ return block
+}
+
+func (blockchain *blockchain) addPersistenceChangesForNewBlock(ctx context.Context,
+ block *protos.Block, stateHash []byte, writeBatch *gorocksdb.WriteBatch) (uint64, error) {
+ block = blockchain.buildBlock(block, stateHash)
+ if block.NonHashData == nil {
+ block.NonHashData = &protos.NonHashData{LocalLedgerCommitTimestamp: util.CreateUtcTimestamp()}
+ } else {
+ block.NonHashData.LocalLedgerCommitTimestamp = util.CreateUtcTimestamp()
+ }
+ blockNumber := blockchain.size
+ blockHash, err := block.GetHash()
+ if err != nil {
+ return 0, err
+ }
+ blockBytes, blockBytesErr := block.Bytes()
+ if blockBytesErr != nil {
+ return 0, blockBytesErr
+ }
+ writeBatch.PutCF(db.GetDBHandle().BlockchainCF, encodeBlockNumberDBKey(blockNumber), blockBytes)
+ writeBatch.PutCF(db.GetDBHandle().BlockchainCF, blockCountKey, encodeUint64(blockNumber+1))
+ if blockchain.indexer.isSynchronous() {
+ blockchain.indexer.createIndexesSync(block, blockNumber, blockHash, writeBatch)
+ }
+ blockchain.lastProcessedBlock = &lastProcessedBlock{block, blockNumber, blockHash}
+ return blockNumber, nil
+}
+
+func (blockchain *blockchain) blockPersistenceStatus(success bool) {
+ if success {
+ blockchain.size++
+ blockchain.previousBlockHash = blockchain.lastProcessedBlock.blockHash
+ if !blockchain.indexer.isSynchronous() {
+ blockchain.indexer.createIndexesAsync(blockchain.lastProcessedBlock.block,
+ blockchain.lastProcessedBlock.blockNumber, blockchain.lastProcessedBlock.blockHash)
+ }
+ }
+ blockchain.lastProcessedBlock = nil
+}
+
+func (blockchain *blockchain) persistRawBlock(block *protos.Block, blockNumber uint64) error {
+ blockBytes, blockBytesErr := block.Bytes()
+ if blockBytesErr != nil {
+ return blockBytesErr
+ }
+ writeBatch := gorocksdb.NewWriteBatch()
+ defer writeBatch.Destroy()
+ writeBatch.PutCF(db.GetDBHandle().BlockchainCF, encodeBlockNumberDBKey(blockNumber), blockBytes)
+
+ blockHash, err := block.GetHash()
+ if err != nil {
+ return err
+ }
+
+ // Need to check as we support out of order blocks in cases such as block/state synchronization. This is
+ // real blockchain height, not size.
+ if blockchain.getSize() < blockNumber+1 {
+ sizeBytes := encodeUint64(blockNumber + 1)
+ writeBatch.PutCF(db.GetDBHandle().BlockchainCF, blockCountKey, sizeBytes)
+ blockchain.size = blockNumber + 1
+ blockchain.previousBlockHash = blockHash
+ }
+
+ if blockchain.indexer.isSynchronous() {
+ blockchain.indexer.createIndexesSync(block, blockNumber, blockHash, writeBatch)
+ }
+
+ opt := gorocksdb.NewDefaultWriteOptions()
+ defer opt.Destroy()
+ err = db.GetDBHandle().DB.Write(opt, writeBatch)
+ if err != nil {
+ return err
+ }
+ return nil
+}
+
+func fetchBlockFromDB(blockNumber uint64) (*protos.Block, error) {
+ blockBytes, err := db.GetDBHandle().GetFromBlockchainCF(encodeBlockNumberDBKey(blockNumber))
+ if err != nil {
+ return nil, err
+ }
+ if blockBytes == nil {
+ return nil, nil
+ }
+ return protos.UnmarshallBlock(blockBytes)
+}
+
+func fetchBlockchainSizeFromDB() (uint64, error) {
+ bytes, err := db.GetDBHandle().GetFromBlockchainCF(blockCountKey)
+ if err != nil {
+ return 0, err
+ }
+ if bytes == nil {
+ return 0, nil
+ }
+ return decodeToUint64(bytes), nil
+}
+
+func fetchBlockchainSizeFromSnapshot(snapshot *gorocksdb.Snapshot) (uint64, error) {
+ blockNumberBytes, err := db.GetDBHandle().GetFromBlockchainCFSnapshot(snapshot, blockCountKey)
+ if err != nil {
+ return 0, err
+ }
+ var blockNumber uint64
+ if blockNumberBytes != nil {
+ blockNumber = decodeToUint64(blockNumberBytes)
+ }
+ return blockNumber, nil
+}
+
+var blockCountKey = []byte("blockCount")
+
+func encodeBlockNumberDBKey(blockNumber uint64) []byte {
+ return encodeUint64(blockNumber)
+}
+
+func encodeUint64(number uint64) []byte {
+ bytes := make([]byte, 8)
+ binary.BigEndian.PutUint64(bytes, number)
+ return bytes
+}
+
+func decodeToUint64(bytes []byte) uint64 {
+ return binary.BigEndian.Uint64(bytes)
+}
+
+func (blockchain *blockchain) String() string {
+ var buffer bytes.Buffer
+ size := blockchain.getSize()
+ for i := uint64(0); i < size; i++ {
+ block, blockErr := blockchain.getBlock(i)
+ if blockErr != nil {
+ return ""
+ }
+ buffer.WriteString("\n--------------------\n")
+ buffer.WriteString(block.String())
+ buffer.WriteString("\n----------<\\block #")
+ buffer.WriteString(strconv.FormatUint(i, 10))
+ buffer.WriteString(">----------\n")
+ }
+ return buffer.String()
+}
diff --git a/core/ledger/blockchain_indexes.go b/core/ledger/blockchain_indexes.go
new file mode 100644
index 00000000000..5252f513891
--- /dev/null
+++ b/core/ledger/blockchain_indexes.go
@@ -0,0 +1,219 @@
+/*
+Copyright IBM Corp. 2016 All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package ledger
+
+import (
+ "fmt"
+
+ "github.com/golang/protobuf/proto"
+ "github.com/hyperledger/fabric/core/db"
+ "github.com/hyperledger/fabric/protos"
+ "github.com/op/go-logging"
+ "github.com/tecbot/gorocksdb"
+)
+
+var indexLogger = logging.MustGetLogger("indexes")
+var prefixBlockHashKey = byte(1)
+var prefixTxUUIDKey = byte(2)
+var prefixAddressBlockNumCompositeKey = byte(3)
+
+type blockchainIndexer interface {
+ isSynchronous() bool
+ start(blockchain *blockchain) error
+ createIndexesSync(block *protos.Block, blockNumber uint64, blockHash []byte, writeBatch *gorocksdb.WriteBatch) error
+ createIndexesAsync(block *protos.Block, blockNumber uint64, blockHash []byte) error
+ fetchBlockNumberByBlockHash(blockHash []byte) (uint64, error)
+ fetchTransactionIndexByUUID(txUUID string) (uint64, uint64, error)
+ stop()
+}
+
+// Implementation for sync indexer
+type blockchainIndexerSync struct {
+}
+
+func newBlockchainIndexerSync() *blockchainIndexerSync {
+ return &blockchainIndexerSync{}
+}
+
+func (indexer *blockchainIndexerSync) isSynchronous() bool {
+ return true
+}
+
+func (indexer *blockchainIndexerSync) start(blockchain *blockchain) error {
+ return nil
+}
+
+func (indexer *blockchainIndexerSync) createIndexesSync(
+ block *protos.Block, blockNumber uint64, blockHash []byte, writeBatch *gorocksdb.WriteBatch) error {
+ return addIndexDataForPersistence(block, blockNumber, blockHash, writeBatch)
+}
+
+func (indexer *blockchainIndexerSync) createIndexesAsync(block *protos.Block, blockNumber uint64, blockHash []byte) error {
+ return fmt.Errorf("Method not applicable")
+}
+
+func (indexer *blockchainIndexerSync) fetchBlockNumberByBlockHash(blockHash []byte) (uint64, error) {
+ return fetchBlockNumberByBlockHashFromDB(blockHash)
+}
+
+func (indexer *blockchainIndexerSync) fetchTransactionIndexByUUID(txUUID string) (uint64, uint64, error) {
+ return fetchTransactionIndexByUUIDFromDB(txUUID)
+}
+
+func (indexer *blockchainIndexerSync) stop() {
+ return
+}
+
+// Functions for persisting and retrieving index data
+func addIndexDataForPersistence(block *protos.Block, blockNumber uint64, blockHash []byte, writeBatch *gorocksdb.WriteBatch) error {
+ openchainDB := db.GetDBHandle()
+ cf := openchainDB.IndexesCF
+
+ // add blockhash -> blockNumber
+ indexLogger.Debugf("Indexing block number [%d] by hash = [%x]", blockNumber, blockHash)
+ writeBatch.PutCF(cf, encodeBlockHashKey(blockHash), encodeBlockNumber(blockNumber))
+
+ addressToTxIndexesMap := make(map[string][]uint64)
+ addressToChaincodeIDsMap := make(map[string][]*protos.ChaincodeID)
+
+ transactions := block.GetTransactions()
+ for txIndex, tx := range transactions {
+ // add TxUUID -> (blockNumber,indexWithinBlock)
+ writeBatch.PutCF(cf, encodeTxUUIDKey(tx.Uuid), encodeBlockNumTxIndex(blockNumber, uint64(txIndex)))
+
+ txExecutingAddress := getTxExecutingAddress(tx)
+ addressToTxIndexesMap[txExecutingAddress] = append(addressToTxIndexesMap[txExecutingAddress], uint64(txIndex))
+
+ switch tx.Type {
+ case protos.Transaction_CHAINCODE_DEPLOY, protos.Transaction_CHAINCODE_INVOKE:
+ authroizedAddresses, chaincodeID := getAuthorisedAddresses(tx)
+ for _, authroizedAddress := range authroizedAddresses {
+ addressToChaincodeIDsMap[authroizedAddress] = append(addressToChaincodeIDsMap[authroizedAddress], chaincodeID)
+ }
+ }
+ }
+ for address, txsIndexes := range addressToTxIndexesMap {
+ writeBatch.PutCF(cf, encodeAddressBlockNumCompositeKey(address, blockNumber), encodeListTxIndexes(txsIndexes))
+ }
+ return nil
+}
+
+func fetchBlockNumberByBlockHashFromDB(blockHash []byte) (uint64, error) {
+ indexLogger.Debugf("fetchBlockNumberByBlockHashFromDB() for blockhash [%x]", blockHash)
+ blockNumberBytes, err := db.GetDBHandle().GetFromIndexesCF(encodeBlockHashKey(blockHash))
+ if err != nil {
+ return 0, err
+ }
+ indexLogger.Debugf("blockNumberBytes for blockhash [%x] is [%x]", blockHash, blockNumberBytes)
+ if len(blockNumberBytes) == 0 {
+ return 0, newLedgerError(ErrorTypeBlockNotFound, fmt.Sprintf("No block indexed with block hash [%x]", blockHash))
+ }
+ blockNumber := decodeBlockNumber(blockNumberBytes)
+ return blockNumber, nil
+}
+
+func fetchTransactionIndexByUUIDFromDB(txUUID string) (uint64, uint64, error) {
+ blockNumTxIndexBytes, err := db.GetDBHandle().GetFromIndexesCF(encodeTxUUIDKey(txUUID))
+ if err != nil {
+ return 0, 0, err
+ }
+ if blockNumTxIndexBytes == nil {
+ return 0, 0, ErrResourceNotFound
+ }
+ return decodeBlockNumTxIndex(blockNumTxIndexBytes)
+}
+
+func getTxExecutingAddress(tx *protos.Transaction) string {
+ // TODO Fetch address form tx
+ return "address1"
+}
+
+func getAuthorisedAddresses(tx *protos.Transaction) ([]string, *protos.ChaincodeID) {
+ // TODO fetch address from chaincode deployment tx
+ // TODO this method should also return error
+ data := tx.ChaincodeID
+ cID := &protos.ChaincodeID{}
+ err := proto.Unmarshal(data, cID)
+ if err != nil {
+ return nil, nil
+ }
+ return []string{"address1", "address2"}, cID
+}
+
+// functions for encoding/decoding db keys/values for index data
+// encode / decode BlockNumber
+func encodeBlockNumber(blockNumber uint64) []byte {
+ return proto.EncodeVarint(blockNumber)
+}
+
+func decodeBlockNumber(blockNumberBytes []byte) (blockNumber uint64) {
+ blockNumber, _ = proto.DecodeVarint(blockNumberBytes)
+ return
+}
+
+// encode / decode BlockNumTxIndex
+func encodeBlockNumTxIndex(blockNumber uint64, txIndexInBlock uint64) []byte {
+ b := proto.NewBuffer([]byte{})
+ b.EncodeVarint(blockNumber)
+ b.EncodeVarint(txIndexInBlock)
+ return b.Bytes()
+}
+
+func decodeBlockNumTxIndex(bytes []byte) (blockNum uint64, txIndex uint64, err error) {
+ b := proto.NewBuffer(bytes)
+ blockNum, err = b.DecodeVarint()
+ if err != nil {
+ return
+ }
+ txIndex, err = b.DecodeVarint()
+ if err != nil {
+ return
+ }
+ return
+}
+
+// encode BlockHashKey
+func encodeBlockHashKey(blockHash []byte) []byte {
+ return prependKeyPrefix(prefixBlockHashKey, blockHash)
+}
+
+// encode TxUUIDKey
+func encodeTxUUIDKey(txUUID string) []byte {
+ return prependKeyPrefix(prefixTxUUIDKey, []byte(txUUID))
+}
+
+func encodeAddressBlockNumCompositeKey(address string, blockNumber uint64) []byte {
+ b := proto.NewBuffer([]byte{prefixAddressBlockNumCompositeKey})
+ b.EncodeRawBytes([]byte(address))
+ b.EncodeVarint(blockNumber)
+ return b.Bytes()
+}
+
+func encodeListTxIndexes(listTx []uint64) []byte {
+ b := proto.NewBuffer([]byte{})
+ for i := range listTx {
+ b.EncodeVarint(listTx[i])
+ }
+ return b.Bytes()
+}
+
+func prependKeyPrefix(prefix byte, key []byte) []byte {
+ modifiedKey := []byte{}
+ modifiedKey = append(modifiedKey, prefix)
+ modifiedKey = append(modifiedKey, key...)
+ return modifiedKey
+}
diff --git a/core/ledger/blockchain_indexes_async.go b/core/ledger/blockchain_indexes_async.go
new file mode 100644
index 00000000000..11b6484507e
--- /dev/null
+++ b/core/ledger/blockchain_indexes_async.go
@@ -0,0 +1,329 @@
+/*
+Copyright IBM Corp. 2016 All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package ledger
+
+import (
+ "fmt"
+ "sync"
+
+ "github.com/hyperledger/fabric/core/db"
+ "github.com/hyperledger/fabric/protos"
+ "github.com/tecbot/gorocksdb"
+)
+
+var lastIndexedBlockKey = []byte{byte(0)}
+
+type blockWrapper struct {
+ block *protos.Block
+ blockNumber uint64
+ blockHash []byte
+ stopNow bool
+}
+
+type blockchainIndexerAsync struct {
+ blockchain *blockchain
+ // Channel for transferring block from block chain for indexing
+ blockChan chan blockWrapper
+ indexerState *blockchainIndexerState
+}
+
+func newBlockchainIndexerAsync() *blockchainIndexerAsync {
+ return new(blockchainIndexerAsync)
+}
+
+func (indexer *blockchainIndexerAsync) isSynchronous() bool {
+ return false
+}
+
+func (indexer *blockchainIndexerAsync) start(blockchain *blockchain) error {
+ indexer.blockchain = blockchain
+ indexerState, err := newBlockchainIndexerState(indexer)
+ if err != nil {
+ return err
+ }
+ indexer.indexerState = indexerState
+ indexLogger.Debugf("staring indexer, lastIndexedBlockNum = [%d]",
+ indexer.indexerState.getLastIndexedBlockNumber())
+
+ err = indexer.indexPendingBlocks()
+ if err != nil {
+ return err
+ }
+ indexLogger.Debugf("staring indexer, lastIndexedBlockNum = [%d] after processing pending blocks",
+ indexer.indexerState.getLastIndexedBlockNumber())
+ indexer.blockChan = make(chan blockWrapper)
+ go func() {
+ for {
+ indexLogger.Debug("Going to wait on channel for next block to index")
+ blockWrapper := <-indexer.blockChan
+
+ indexLogger.Debugf("Blockwrapper received on channel: block number = [%d]", blockWrapper.blockNumber)
+
+ if blockWrapper.stopNow {
+ indexLogger.Debug("stop command received on channel")
+ indexer.blockChan <- blockWrapper
+ return
+ }
+ if indexer.indexerState.hasError() {
+ indexLogger.Debugf("Not indexing block number [%d]. Because of previous error: %s.",
+ blockWrapper.blockNumber, indexer.indexerState.getError())
+ continue
+ }
+
+ err := indexer.createIndexesInternal(blockWrapper.block, blockWrapper.blockNumber, blockWrapper.blockHash)
+ if err != nil {
+ indexer.indexerState.setError(err)
+ indexLogger.Debugf(
+ "Error occured while indexing block number [%d]. Error: %s. Further blocks will not be indexed",
+ blockWrapper.blockNumber, err)
+
+ } else {
+ indexLogger.Debugf("Finished indexing block number [%d]", blockWrapper.blockNumber)
+ }
+ }
+ }()
+ return nil
+}
+
+func (indexer *blockchainIndexerAsync) createIndexesSync(
+ block *protos.Block, blockNumber uint64, blockHash []byte, writeBatch *gorocksdb.WriteBatch) error {
+ return fmt.Errorf("Method not applicable")
+}
+
+func (indexer *blockchainIndexerAsync) createIndexesAsync(block *protos.Block, blockNumber uint64, blockHash []byte) error {
+ indexer.blockChan <- blockWrapper{block, blockNumber, blockHash, false}
+ return nil
+}
+
+// createIndexes adds entries into db for creating indexes on various attributes
+func (indexer *blockchainIndexerAsync) createIndexesInternal(block *protos.Block, blockNumber uint64, blockHash []byte) error {
+ openchainDB := db.GetDBHandle()
+ writeBatch := gorocksdb.NewWriteBatch()
+ defer writeBatch.Destroy()
+ addIndexDataForPersistence(block, blockNumber, blockHash, writeBatch)
+ writeBatch.PutCF(openchainDB.IndexesCF, lastIndexedBlockKey, encodeBlockNumber(blockNumber))
+ opt := gorocksdb.NewDefaultWriteOptions()
+ defer opt.Destroy()
+ err := openchainDB.DB.Write(opt, writeBatch)
+ if err != nil {
+ return err
+ }
+ indexer.indexerState.blockIndexed(blockNumber)
+ return nil
+}
+
+func (indexer *blockchainIndexerAsync) fetchBlockNumberByBlockHash(blockHash []byte) (uint64, error) {
+ err := indexer.indexerState.checkError()
+ if err != nil {
+ indexLogger.Debug("Async indexer has a previous error. Returing the error")
+ return 0, err
+ }
+ indexer.indexerState.waitForLastCommittedBlock()
+ return fetchBlockNumberByBlockHashFromDB(blockHash)
+}
+
+func (indexer *blockchainIndexerAsync) fetchTransactionIndexByUUID(txUUID string) (uint64, uint64, error) {
+ err := indexer.indexerState.checkError()
+ if err != nil {
+ return 0, 0, err
+ }
+ indexer.indexerState.waitForLastCommittedBlock()
+ return fetchTransactionIndexByUUIDFromDB(txUUID)
+}
+
+func (indexer *blockchainIndexerAsync) indexPendingBlocks() error {
+ blockchain := indexer.blockchain
+ if blockchain.getSize() == 0 {
+ // chain is empty as yet
+ return nil
+ }
+
+ lastCommittedBlockNum := blockchain.getSize() - 1
+ lastIndexedBlockNum := indexer.indexerState.getLastIndexedBlockNumber()
+ zerothBlockIndexed := indexer.indexerState.isZerothBlockIndexed()
+
+ indexLogger.Debugf("lastCommittedBlockNum=[%d], lastIndexedBlockNum=[%d], zerothBlockIndexed=[%t]",
+ lastCommittedBlockNum, lastIndexedBlockNum, zerothBlockIndexed)
+
+ // block numbers use uint64 - so, 'lastIndexedBlockNum = 0' is ambiguous.
+ // So, explicitly checking whether zero-th block has been indexed
+ if !zerothBlockIndexed {
+ err := indexer.fetchBlockFromDBAndCreateIndexes(0)
+ if err != nil {
+ return err
+ }
+ }
+
+ if lastCommittedBlockNum == lastIndexedBlockNum {
+ // all committed blocks are indexed
+ return nil
+ }
+
+ for ; lastIndexedBlockNum < lastCommittedBlockNum; lastIndexedBlockNum++ {
+ blockNumToIndex := lastIndexedBlockNum + 1
+ err := indexer.fetchBlockFromDBAndCreateIndexes(blockNumToIndex)
+ if err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func (indexer *blockchainIndexerAsync) fetchBlockFromDBAndCreateIndexes(blockNumber uint64) error {
+ blockchain := indexer.blockchain
+ blockToIndex, errBlockFetch := blockchain.getBlock(blockNumber)
+ if errBlockFetch != nil {
+ return errBlockFetch
+ }
+
+ blockHash, errBlockHash := blockToIndex.GetHash()
+ if errBlockHash != nil {
+ return errBlockHash
+ }
+ indexer.createIndexesInternal(blockToIndex, blockNumber, blockHash)
+ return nil
+}
+
+func (indexer *blockchainIndexerAsync) stop() {
+ indexer.indexerState.waitForLastCommittedBlock()
+ indexer.blockChan <- blockWrapper{nil, 0, nil, true}
+ <-indexer.blockChan
+ close(indexer.blockChan)
+}
+
+// Code related to tracking the block number that has been indexed
+// and if there has been an error in indexing a block
+// Since, we index blocks asynchronously, there may be a case when
+// a client query arrives before a block has been indexed.
+//
+// Do we really need strict semantics such that an index query results
+// should include up to block number (or higher) that may have been committed
+// when user query arrives?
+// If a delay of a couple of blocks are allowed, we can get rid of this synchronization stuff
+type blockchainIndexerState struct {
+ indexer *blockchainIndexerAsync
+
+ zerothBlockIndexed bool
+ lastBlockIndexed uint64
+ err error
+ lock *sync.RWMutex
+ newBlockIndexed *sync.Cond
+}
+
+func newBlockchainIndexerState(indexer *blockchainIndexerAsync) (*blockchainIndexerState, error) {
+ var lock sync.RWMutex
+ zerothBlockIndexed, lastIndexedBlockNum, err := fetchLastIndexedBlockNumFromDB()
+ if err != nil {
+ return nil, err
+ }
+ return &blockchainIndexerState{indexer, zerothBlockIndexed, lastIndexedBlockNum, nil, &lock, sync.NewCond(&lock)}, nil
+}
+
+func (indexerState *blockchainIndexerState) blockIndexed(blockNumber uint64) {
+ indexerState.newBlockIndexed.L.Lock()
+ defer indexerState.newBlockIndexed.L.Unlock()
+ indexerState.lastBlockIndexed = blockNumber
+ indexerState.zerothBlockIndexed = true
+ indexerState.newBlockIndexed.Broadcast()
+}
+
+func (indexerState *blockchainIndexerState) getLastIndexedBlockNumber() uint64 {
+ indexerState.lock.RLock()
+ defer indexerState.lock.RUnlock()
+ return indexerState.lastBlockIndexed
+}
+
+func (indexerState *blockchainIndexerState) isZerothBlockIndexed() bool {
+ indexerState.lock.RLock()
+ defer indexerState.lock.RUnlock()
+ return indexerState.zerothBlockIndexed
+}
+
+func (indexerState *blockchainIndexerState) waitForLastCommittedBlock() error {
+ indexLogger.Debugf("waitForLastCommittedBlock() indexerState.err = %#v", indexerState.err)
+ chain := indexerState.indexer.blockchain
+ indexerState.lock.Lock()
+ defer indexerState.lock.Unlock()
+ if indexerState.err != nil {
+ return indexerState.err
+ }
+
+ if chain.getSize() == 0 {
+ return nil
+ }
+
+ lastBlockCommitted := chain.getSize() - 1
+
+ if !indexerState.zerothBlockIndexed {
+ indexLogger.Debugf(
+ "Waiting for zeroth block to be indexed. lastBlockCommitted=[%d] and lastBlockIndexed=[%d]",
+ lastBlockCommitted, indexerState.lastBlockIndexed)
+ indexerState.newBlockIndexed.Wait()
+ }
+
+ for indexerState.lastBlockIndexed < lastBlockCommitted && indexerState.err == nil {
+ indexLogger.Debugf(
+ "Waiting for index to catch up with block chain. lastBlockCommitted=[%d] and lastBlockIndexed=[%d]",
+ lastBlockCommitted, indexerState.lastBlockIndexed)
+ indexerState.newBlockIndexed.Wait()
+ }
+ return indexerState.err
+}
+
+func (indexerState *blockchainIndexerState) setError(err error) {
+ indexerState.lock.Lock()
+ defer indexerState.lock.Unlock()
+ indexerState.err = err
+ indexLogger.Debugf("setError() indexerState.err = %#v", indexerState.err)
+ indexerState.newBlockIndexed.Broadcast()
+}
+
+func (indexerState *blockchainIndexerState) hasError() bool {
+ indexerState.lock.RLock()
+ defer indexerState.lock.RUnlock()
+ return indexerState.err != nil
+}
+
+func (indexerState *blockchainIndexerState) getError() error {
+ indexerState.lock.RLock()
+ defer indexerState.lock.RUnlock()
+ return indexerState.err
+}
+
+func (indexerState *blockchainIndexerState) checkError() error {
+ indexerState.lock.RLock()
+ defer indexerState.lock.RUnlock()
+ if indexerState.err != nil {
+ return fmt.Errorf(
+ "An error had occured during indexing block number [%d]. So, index is out of sync. Detail of the error = %s",
+ indexerState.getLastIndexedBlockNumber()+1, indexerState.err)
+ }
+ return indexerState.err
+}
+
+func fetchLastIndexedBlockNumFromDB() (zerothBlockIndexed bool, lastIndexedBlockNum uint64, err error) {
+ lastIndexedBlockNumberBytes, err := db.GetDBHandle().GetFromIndexesCF(lastIndexedBlockKey)
+ if err != nil {
+ return
+ }
+ if lastIndexedBlockNumberBytes == nil {
+ return
+ }
+ lastIndexedBlockNum = decodeBlockNumber(lastIndexedBlockNumberBytes)
+ zerothBlockIndexed = true
+ return
+}
diff --git a/core/ledger/blockchain_indexes_async_test.go b/core/ledger/blockchain_indexes_async_test.go
new file mode 100644
index 00000000000..f80f85d1725
--- /dev/null
+++ b/core/ledger/blockchain_indexes_async_test.go
@@ -0,0 +1,202 @@
+/*
+Copyright IBM Corp. 2016 All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package ledger
+
+import (
+ "errors"
+ "fmt"
+ "testing"
+ "time"
+
+ "github.com/hyperledger/fabric/core/ledger/testutil"
+ "github.com/hyperledger/fabric/protos"
+ "github.com/tecbot/gorocksdb"
+)
+
+func TestIndexesAsync_GetBlockByBlockNumber(t *testing.T) {
+ defaultSetting := indexBlockDataSynchronously
+ indexBlockDataSynchronously = false
+ defer func() { indexBlockDataSynchronously = defaultSetting }()
+ testIndexesGetBlockByBlockNumber(t)
+}
+
+func TestIndexesAsync_GetBlockByBlockHash(t *testing.T) {
+ defaultSetting := indexBlockDataSynchronously
+ indexBlockDataSynchronously = false
+ defer func() { indexBlockDataSynchronously = defaultSetting }()
+ testIndexesGetBlockByBlockHash(t)
+}
+
+func TestIndexesAsync_GetBlockByBlockHashWrongHash(t *testing.T) {
+ defaultSetting := indexBlockDataSynchronously
+ indexBlockDataSynchronously = false
+ defer func() { indexBlockDataSynchronously = defaultSetting }()
+ testIndexesGetBlockByBlockHashWrongHash(t)
+}
+
+func TestIndexesAsync_GetTransactionByBlockNumberAndTxIndex(t *testing.T) {
+ defaultSetting := indexBlockDataSynchronously
+ indexBlockDataSynchronously = false
+ defer func() { indexBlockDataSynchronously = defaultSetting }()
+ testIndexesGetTransactionByBlockNumberAndTxIndex(t)
+}
+
+func TestIndexesAsync_GetTransactionByBlockHashAndTxIndex(t *testing.T) {
+ defaultSetting := indexBlockDataSynchronously
+ indexBlockDataSynchronously = false
+ defer func() { indexBlockDataSynchronously = defaultSetting }()
+ testIndexesGetTransactionByBlockHashAndTxIndex(t)
+}
+
+func TestIndexesAsync_GetTransactionByUUID(t *testing.T) {
+ defaultSetting := indexBlockDataSynchronously
+ indexBlockDataSynchronously = false
+ defer func() { indexBlockDataSynchronously = defaultSetting }()
+ testIndexesGetTransactionByUUID(t)
+}
+
+func TestIndexesAsync_IndexingErrorScenario(t *testing.T) {
+ defaultSetting := indexBlockDataSynchronously
+ indexBlockDataSynchronously = false
+ defer func() { indexBlockDataSynchronously = defaultSetting }()
+
+ testDBWrapper.CleanDB(t)
+ testBlockchainWrapper := newTestBlockchainWrapper(t)
+ chain := testBlockchainWrapper.blockchain
+ asyncIndexer, _ := chain.indexer.(*blockchainIndexerAsync)
+
+ defer func() {
+ // first stop and then set the error to nil.
+ // Otherwise stop may hang (waiting for catching up the index with the committing block)
+ testBlockchainWrapper.blockchain.indexer.stop()
+ asyncIndexer.indexerState.setError(nil)
+ }()
+
+ blocks, _, err := testBlockchainWrapper.populateBlockChainWithSampleData()
+ if err != nil {
+ t.Logf("Error populating block chain with sample data: %s", err)
+ t.Fail()
+ }
+
+ t.Log("Setting an error artificially so as to client query gets an error")
+ asyncIndexer.indexerState.setError(errors.New("Error created for testing"))
+
+ // populate more data after error
+ _, _, err = testBlockchainWrapper.populateBlockChainWithSampleData()
+ if err != nil {
+ t.Logf("Error populating block chain with sample data: %s", err)
+ t.Fail()
+ }
+ fmt.Println("Going to execute QUERY")
+ blockHash, _ := blocks[0].GetHash()
+ // index query should throw error
+
+ _, err = chain.getBlockByHash(blockHash)
+ fmt.Println("executed QUERY")
+ if err == nil {
+ t.Fatal("Error expected during execution of client query")
+ }
+}
+
+func TestIndexesAsync_ClientWaitScenario(t *testing.T) {
+ defaultSetting := indexBlockDataSynchronously
+ indexBlockDataSynchronously = false
+ defer func() { indexBlockDataSynchronously = defaultSetting }()
+
+ testDBWrapper.CleanDB(t)
+ testBlockchainWrapper := newTestBlockchainWrapper(t)
+ defer func() { testBlockchainWrapper.blockchain.indexer.stop() }()
+
+ chain := testBlockchainWrapper.blockchain
+ blocks, _, err := testBlockchainWrapper.populateBlockChainWithSampleData()
+ if err != nil {
+ t.Logf("Error populating block chain with sample data: %s", err)
+ t.Fail()
+ }
+ t.Log("Increasing size of blockchain by one artificially so as to make client wait")
+ chain.size = chain.size + 1
+ t.Log("Resetting size of blockchain to original and adding one block in a separate go routine so as to wake up the client")
+ go func() {
+ time.Sleep(2 * time.Second)
+ chain.size = chain.size - 1
+ blk, err := buildTestBlock(t)
+ if err != nil {
+ t.Logf("Error building test block: %s", err)
+ t.Fail()
+ }
+ testBlockchainWrapper.addNewBlock(blk, []byte("stateHash"))
+ }()
+ t.Log("Executing client query. The client would wait and will be woken up")
+ blockHash, _ := blocks[0].GetHash()
+ block := testBlockchainWrapper.getBlockByHash(blockHash)
+ testutil.AssertEquals(t, block, blocks[0])
+}
+
+type NoopIndexer struct {
+}
+
+func (noop *NoopIndexer) isSynchronous() bool {
+ return true
+}
+func (noop *NoopIndexer) start(blockchain *blockchain) error {
+ return nil
+}
+func (noop *NoopIndexer) createIndexesSync(block *protos.Block, blockNumber uint64, blockHash []byte, writeBatch *gorocksdb.WriteBatch) error {
+ return nil
+}
+func (noop *NoopIndexer) createIndexesAsync(block *protos.Block, blockNumber uint64, blockHash []byte) error {
+ return nil
+}
+func (noop *NoopIndexer) fetchBlockNumberByBlockHash(blockHash []byte) (uint64, error) {
+ return 0, nil
+}
+func (noop *NoopIndexer) fetchTransactionIndexByUUID(txUUID string) (uint64, uint64, error) {
+ return 0, 0, nil
+}
+func (noop *NoopIndexer) stop() {
+}
+
+func TestIndexesAsync_IndexPendingBlocks(t *testing.T) {
+ defaultSetting := indexBlockDataSynchronously
+ indexBlockDataSynchronously = false
+ defer func() { indexBlockDataSynchronously = defaultSetting }()
+
+ testDBWrapper.CleanDB(t)
+ testBlockchainWrapper := newTestBlockchainWrapper(t)
+
+ // stop the original indexer and change the indexer to Noop - so, no block is indexed
+ chain := testBlockchainWrapper.blockchain
+ chain.indexer.stop()
+ chain.indexer = &NoopIndexer{}
+ blocks, _, err := testBlockchainWrapper.populateBlockChainWithSampleData()
+ if err != nil {
+ t.Fatalf("Error populating block chain with sample data: %s", err)
+ }
+
+ // close the db and create new instance of blockchain (and the associated async indexer) - the indexer should index the pending blocks
+ testDBWrapper.CloseDB(t)
+ testBlockchainWrapper = newTestBlockchainWrapper(t)
+ defer chain.indexer.stop()
+
+ blockHash, _ := blocks[0].GetHash()
+ block := testBlockchainWrapper.getBlockByHash(blockHash)
+ testutil.AssertEquals(t, block, blocks[0])
+
+ blockHash, _ = blocks[len(blocks)-1].GetHash()
+ block = testBlockchainWrapper.getBlockByHash(blockHash)
+ testutil.AssertEquals(t, block, blocks[len(blocks)-1])
+}
diff --git a/core/ledger/blockchain_indexes_test.go b/core/ledger/blockchain_indexes_test.go
new file mode 100644
index 00000000000..6e6016f241b
--- /dev/null
+++ b/core/ledger/blockchain_indexes_test.go
@@ -0,0 +1,161 @@
+/*
+Copyright IBM Corp. 2016 All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package ledger
+
+import (
+ "testing"
+
+ "github.com/hyperledger/fabric/core/ledger/testutil"
+ "github.com/hyperledger/fabric/protos"
+)
+
+func TestIndexes_GetBlockByBlockNumber(t *testing.T) {
+ defaultSetting := indexBlockDataSynchronously
+ indexBlockDataSynchronously = true
+ defer func() { indexBlockDataSynchronously = defaultSetting }()
+ testIndexesGetBlockByBlockNumber(t)
+}
+
+func TestIndexes_GetBlockByBlockHash(t *testing.T) {
+ defaultSetting := indexBlockDataSynchronously
+ indexBlockDataSynchronously = true
+ defer func() { indexBlockDataSynchronously = defaultSetting }()
+ testIndexesGetBlockByBlockHash(t)
+}
+
+func TestIndexes_GetBlockByBlockHashWrongHash(t *testing.T) {
+ defaultSetting := indexBlockDataSynchronously
+ indexBlockDataSynchronously = true
+ defer func() { indexBlockDataSynchronously = defaultSetting }()
+ testIndexesGetBlockByBlockHashWrongHash(t)
+}
+
+func TestIndexes_GetTransactionByBlockNumberAndTxIndex(t *testing.T) {
+ defaultSetting := indexBlockDataSynchronously
+ indexBlockDataSynchronously = true
+ defer func() { indexBlockDataSynchronously = defaultSetting }()
+ testIndexesGetTransactionByBlockNumberAndTxIndex(t)
+}
+
+func TestIndexes_GetTransactionByBlockHashAndTxIndex(t *testing.T) {
+ defaultSetting := indexBlockDataSynchronously
+ indexBlockDataSynchronously = true
+ defer func() { indexBlockDataSynchronously = defaultSetting }()
+ testIndexesGetTransactionByBlockHashAndTxIndex(t)
+}
+
+func TestIndexes_GetTransactionByUUID(t *testing.T) {
+ defaultSetting := indexBlockDataSynchronously
+ indexBlockDataSynchronously = true
+ defer func() { indexBlockDataSynchronously = defaultSetting }()
+ testIndexesGetTransactionByUUID(t)
+}
+
+func testIndexesGetBlockByBlockNumber(t *testing.T) {
+ testDBWrapper.CleanDB(t)
+ testBlockchainWrapper := newTestBlockchainWrapper(t)
+ defer func() { testBlockchainWrapper.blockchain.indexer.stop() }()
+ blocks, _, err := testBlockchainWrapper.populateBlockChainWithSampleData()
+ if err != nil {
+ t.Logf("Error populating block chain with sample data: %s", err)
+ t.Fail()
+ }
+ for i := range blocks {
+ testutil.AssertEquals(t, testBlockchainWrapper.getBlock(uint64(i)), blocks[i])
+ }
+}
+
+func testIndexesGetBlockByBlockHash(t *testing.T) {
+ testDBWrapper.CleanDB(t)
+ testBlockchainWrapper := newTestBlockchainWrapper(t)
+ defer func() { testBlockchainWrapper.blockchain.indexer.stop() }()
+ blocks, _, err := testBlockchainWrapper.populateBlockChainWithSampleData()
+ if err != nil {
+ t.Logf("Error populating block chain with sample data: %s", err)
+ t.Fail()
+ }
+ for i := range blocks {
+ blockHash, _ := blocks[i].GetHash()
+ testutil.AssertEquals(t, testBlockchainWrapper.getBlockByHash(blockHash), blocks[i])
+ }
+}
+
+func testIndexesGetBlockByBlockHashWrongHash(t *testing.T) {
+ testDBWrapper.CleanDB(t)
+ testBlockchainWrapper := newTestBlockchainWrapper(t)
+ defer func() { testBlockchainWrapper.blockchain.indexer.stop() }()
+ _, err := testBlockchainWrapper.blockchain.getBlockByHash([]byte("NotAnActualHash"))
+ ledgerErr, ok := err.(*Error)
+ if !(ok && ledgerErr.Type() == ErrorTypeBlockNotFound) {
+ t.Fatal("A 'LedgerError' of type 'ErrorTypeBlockNotFound' should have been thrown")
+ } else {
+ t.Logf("An expected error [%s] is received", err)
+ }
+}
+
+func testIndexesGetTransactionByBlockNumberAndTxIndex(t *testing.T) {
+ testDBWrapper.CleanDB(t)
+ testBlockchainWrapper := newTestBlockchainWrapper(t)
+ defer func() { testBlockchainWrapper.blockchain.indexer.stop() }()
+ blocks, _, err := testBlockchainWrapper.populateBlockChainWithSampleData()
+ if err != nil {
+ t.Logf("Error populating block chain with sample data: %s", err)
+ t.Fail()
+ }
+ for i, block := range blocks {
+ for j, tx := range block.GetTransactions() {
+ testutil.AssertEquals(t, testBlockchainWrapper.getTransaction(uint64(i), uint64(j)), tx)
+ }
+ }
+}
+
+func testIndexesGetTransactionByBlockHashAndTxIndex(t *testing.T) {
+ testDBWrapper.CleanDB(t)
+ testBlockchainWrapper := newTestBlockchainWrapper(t)
+ defer func() { testBlockchainWrapper.blockchain.indexer.stop() }()
+ blocks, _, err := testBlockchainWrapper.populateBlockChainWithSampleData()
+ if err != nil {
+ t.Logf("Error populating block chain with sample data: %s", err)
+ t.Fail()
+ }
+ for _, block := range blocks {
+ blockHash, _ := block.GetHash()
+ for j, tx := range block.GetTransactions() {
+ testutil.AssertEquals(t, testBlockchainWrapper.getTransactionByBlockHash(blockHash, uint64(j)), tx)
+ }
+ }
+}
+
+func testIndexesGetTransactionByUUID(t *testing.T) {
+ testDBWrapper.CleanDB(t)
+ testBlockchainWrapper := newTestBlockchainWrapper(t)
+ defer func() { testBlockchainWrapper.blockchain.indexer.stop() }()
+ tx1, uuid1 := buildTestTx(t)
+ tx2, uuid2 := buildTestTx(t)
+ block1 := protos.NewBlock([]*protos.Transaction{tx1, tx2}, nil)
+ testBlockchainWrapper.addNewBlock(block1, []byte("stateHash1"))
+
+ tx3, uuid3 := buildTestTx(t)
+ tx4, uuid4 := buildTestTx(t)
+ block2 := protos.NewBlock([]*protos.Transaction{tx3, tx4}, nil)
+ testBlockchainWrapper.addNewBlock(block2, []byte("stateHash2"))
+
+ testutil.AssertEquals(t, testBlockchainWrapper.getTransactionByUUID(uuid1), tx1)
+ testutil.AssertEquals(t, testBlockchainWrapper.getTransactionByUUID(uuid2), tx2)
+ testutil.AssertEquals(t, testBlockchainWrapper.getTransactionByUUID(uuid3), tx3)
+ testutil.AssertEquals(t, testBlockchainWrapper.getTransactionByUUID(uuid4), tx4)
+}
diff --git a/core/ledger/blockchain_test.go b/core/ledger/blockchain_test.go
new file mode 100644
index 00000000000..d3e17b0b970
--- /dev/null
+++ b/core/ledger/blockchain_test.go
@@ -0,0 +1,139 @@
+/*
+Copyright IBM Corp. 2016 All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package ledger
+
+import (
+ "testing"
+ "time"
+
+ "github.com/hyperledger/fabric/core/ledger/testutil"
+ "github.com/hyperledger/fabric/core/util"
+ "github.com/hyperledger/fabric/protos"
+)
+
+func TestBlockchain_InfoNoBlock(t *testing.T) {
+ testDBWrapper.CleanDB(t)
+ blockchainTestWrapper := newTestBlockchainWrapper(t)
+ blockchain := blockchainTestWrapper.blockchain
+ blockchainInfo, err := blockchain.getBlockchainInfo()
+ testutil.AssertNoError(t, err, "Error while invoking getBlockchainInfo() on an emply blockchain")
+ testutil.AssertEquals(t, blockchainInfo.Height, uint64(0))
+ testutil.AssertEquals(t, blockchainInfo.CurrentBlockHash, nil)
+ testutil.AssertEquals(t, blockchainInfo.PreviousBlockHash, nil)
+}
+
+func TestBlockchain_Info(t *testing.T) {
+ testDBWrapper.CleanDB(t)
+ blockchainTestWrapper := newTestBlockchainWrapper(t)
+ blocks, _, _ := blockchainTestWrapper.populateBlockChainWithSampleData()
+
+ blockchain := blockchainTestWrapper.blockchain
+ blockchainInfo, _ := blockchain.getBlockchainInfo()
+ testutil.AssertEquals(t, blockchainInfo.Height, uint64(3))
+ currentBlockHash, _ := blocks[len(blocks)-1].GetHash()
+ previousBlockHash, _ := blocks[len(blocks)-2].GetHash()
+ testutil.AssertEquals(t, blockchainInfo.CurrentBlockHash, currentBlockHash)
+ testutil.AssertEquals(t, blockchainInfo.PreviousBlockHash, previousBlockHash)
+}
+
+func TestBlockChain_SingleBlock(t *testing.T) {
+ testDBWrapper.CleanDB(t)
+ blockchainTestWrapper := newTestBlockchainWrapper(t)
+ blockchain := blockchainTestWrapper.blockchain
+
+ // Create the Chaincode specification
+ chaincodeSpec := &protos.ChaincodeSpec{Type: protos.ChaincodeSpec_GOLANG,
+ ChaincodeID: &protos.ChaincodeID{Path: "Contracts"},
+ CtorMsg: &protos.ChaincodeInput{Function: "Initialize", Args: []string{"param1"}}}
+ chaincodeDeploymentSepc := &protos.ChaincodeDeploymentSpec{ChaincodeSpec: chaincodeSpec}
+ uuid := testutil.GenerateUUID(t)
+ newChaincodeTx, err := protos.NewChaincodeDeployTransaction(chaincodeDeploymentSepc, uuid)
+ testutil.AssertNoError(t, err, "Failed to create new chaincode Deployment Transaction")
+ t.Logf("New chaincode tx: %v", newChaincodeTx)
+
+ block1 := protos.NewBlock([]*protos.Transaction{newChaincodeTx}, nil)
+ blockNumber := blockchainTestWrapper.addNewBlock(block1, []byte("stateHash1"))
+ t.Logf("New chain: %v", blockchain)
+ testutil.AssertEquals(t, blockNumber, uint64(0))
+ testutil.AssertEquals(t, blockchain.getSize(), uint64(1))
+ testutil.AssertEquals(t, blockchainTestWrapper.fetchBlockchainSizeFromDB(), uint64(1))
+}
+
+func TestBlockChain_SimpleChain(t *testing.T) {
+ testDBWrapper.CleanDB(t)
+ blockchainTestWrapper := newTestBlockchainWrapper(t)
+ blockchain := blockchainTestWrapper.blockchain
+ allBlocks, allStateHashes, err := blockchainTestWrapper.populateBlockChainWithSampleData()
+ if err != nil {
+ t.Logf("Error populating block chain with sample data: %s", err)
+ t.Fail()
+ }
+ testutil.AssertEquals(t, blockchain.getSize(), uint64(len(allBlocks)))
+ testutil.AssertEquals(t, blockchainTestWrapper.fetchBlockchainSizeFromDB(), uint64(len(allBlocks)))
+
+ for i := range allStateHashes {
+ t.Logf("Checking state hash for block number = [%d]", i)
+ testutil.AssertEquals(t, blockchainTestWrapper.getBlock(uint64(i)).GetStateHash(), allStateHashes[i])
+ }
+
+ for i := range allBlocks {
+ t.Logf("Checking block hash for block number = [%d]", i)
+ blockhash, _ := blockchainTestWrapper.getBlock(uint64(i)).GetHash()
+ expectedBlockHash, _ := allBlocks[i].GetHash()
+ testutil.AssertEquals(t, blockhash, expectedBlockHash)
+ }
+
+ testutil.AssertNil(t, blockchainTestWrapper.getBlock(uint64(0)).PreviousBlockHash)
+
+ i := 1
+ for i < len(allBlocks) {
+ t.Logf("Checking previous block hash for block number = [%d]", i)
+ expectedPreviousBlockHash, _ := allBlocks[i-1].GetHash()
+ testutil.AssertEquals(t, blockchainTestWrapper.getBlock(uint64(i)).PreviousBlockHash, expectedPreviousBlockHash)
+ i++
+ }
+}
+
+func TestBlockChainEmptyChain(t *testing.T) {
+ testDBWrapper.CleanDB(t)
+ blockchainTestWrapper := newTestBlockchainWrapper(t)
+ testutil.AssertEquals(t, blockchainTestWrapper.blockchain.getSize(), uint64(0))
+ block := blockchainTestWrapper.getLastBlock()
+ if block != nil {
+ t.Fatalf("Get last block on an empty chain should return nil.")
+ }
+ t.Logf("last block = [%s]", block)
+}
+
+func TestBlockchainBlockLedgerCommitTimestamp(t *testing.T) {
+ testDBWrapper.CleanDB(t)
+ blockchainTestWrapper := newTestBlockchainWrapper(t)
+ block1 := protos.NewBlock(nil, nil)
+ startTime := util.CreateUtcTimestamp()
+ time.Sleep(2 * time.Second)
+ blockchainTestWrapper.addNewBlock(block1, []byte("stateHash1"))
+ lastBlock := blockchainTestWrapper.getLastBlock()
+ if lastBlock.NonHashData == nil {
+ t.Fatal("Expected block to have non-hash-data, but it was nil")
+ }
+ if lastBlock.NonHashData.LocalLedgerCommitTimestamp == nil {
+ t.Fatal("Expected block to have non-hash-data timestamp, but it was nil")
+ }
+ if startTime.Seconds >= lastBlock.NonHashData.LocalLedgerCommitTimestamp.Seconds {
+ t.Fatal("Expected block time to be after start time")
+ }
+}
diff --git a/core/ledger/genesis/config.go b/core/ledger/genesis/config.go
new file mode 100644
index 00000000000..f425c4dd221
--- /dev/null
+++ b/core/ledger/genesis/config.go
@@ -0,0 +1,42 @@
+/*
+Copyright IBM Corp. 2016 All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package genesis
+
+import (
+ "sync"
+
+ "github.com/spf13/viper"
+)
+
+var loadConfigOnce sync.Once
+
+var genesis map[string]interface{}
+
+func initConfigs() {
+ loadConfigOnce.Do(func() { loadConfigs() })
+}
+
+func loadConfigs() {
+ genesisLogger.Info("Loading configurations...")
+ genesis = viper.GetStringMap("ledger.blockchain.genesisBlock")
+ genesisLogger.Info("Configurations loaded: genesis=%s", genesis)
+}
+
+func getGenesis() map[string]interface{} {
+ initConfigs()
+ return genesis
+}
diff --git a/core/ledger/genesis/genesis.go b/core/ledger/genesis/genesis.go
new file mode 100644
index 00000000000..a376bb33c27
--- /dev/null
+++ b/core/ledger/genesis/genesis.go
@@ -0,0 +1,49 @@
+/*
+Copyright IBM Corp. 2016 All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package genesis
+
+import (
+ "sync"
+
+ "github.com/hyperledger/fabric/core/ledger"
+ "github.com/op/go-logging"
+)
+
+var genesisLogger = logging.MustGetLogger("genesis")
+
+var makeGenesisError error
+var once sync.Once
+
+// MakeGenesis creates the genesis block based on configuration in core.yaml
+// and adds it to the blockchain.
+func MakeGenesis() error {
+ once.Do(func() {
+ ledger, err := ledger.GetLedger()
+ if err != nil {
+ makeGenesisError = err
+ return
+ }
+
+ if ledger.GetBlockchainSize() == 0 {
+ genesisLogger.Info("Creating genesis block.")
+ if makeGenesisError = ledger.BeginTxBatch(0); makeGenesisError == nil {
+ makeGenesisError = ledger.CommitTxBatch(0, nil, nil, nil)
+ }
+ }
+ })
+ return makeGenesisError
+}
diff --git a/core/ledger/genesis/genesis_test.go b/core/ledger/genesis/genesis_test.go
new file mode 100644
index 00000000000..0dce53d1223
--- /dev/null
+++ b/core/ledger/genesis/genesis_test.go
@@ -0,0 +1,96 @@
+/*
+Copyright IBM Corp. 2016 All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package genesis
+
+import (
+ "fmt"
+ "net"
+ "os"
+ "testing"
+ "time"
+
+ "google.golang.org/grpc"
+ "google.golang.org/grpc/credentials"
+ "google.golang.org/grpc/grpclog"
+
+ "github.com/hyperledger/fabric/core/chaincode"
+ "github.com/hyperledger/fabric/core/ledger"
+ "github.com/hyperledger/fabric/protos"
+ "github.com/spf13/viper"
+)
+
+func TestMain(m *testing.M) {
+ setupTestConfig()
+ os.Exit(m.Run())
+}
+
+func TestGenesis(t *testing.T) {
+
+ //use a different address than what we usually use for "peer"
+ //we override the peerAddress set in chaincode_support.go
+ peerAddress := "0.0.0.0:50303"
+
+ lis, err := net.Listen("tcp", peerAddress)
+ if err != nil {
+ t.Fail()
+ t.Logf("Error starting peer listener %s", err)
+ return
+ }
+
+ var opts []grpc.ServerOption
+ if viper.GetBool("peer.tls.enabled") {
+ creds, err := credentials.NewServerTLSFromFile(viper.GetString("peer.tls.cert.file"), viper.GetString("peer.tls.key.file"))
+ if err != nil {
+ grpclog.Fatalf("Failed to generate credentials %v", err)
+ }
+ opts = []grpc.ServerOption{grpc.Creds(creds)}
+ }
+ grpcServer := grpc.NewServer(opts...)
+
+ getPeerEndpoint := func() (*protos.PeerEndpoint, error) {
+ return &protos.PeerEndpoint{ID: &protos.PeerID{Name: "testpeer"}, Address: peerAddress}, nil
+ }
+
+ ccStartupTimeout := time.Duration(30000) * time.Millisecond
+ protos.RegisterChaincodeSupportServer(grpcServer, chaincode.NewChaincodeSupport(chaincode.DefaultChain, getPeerEndpoint, false, ccStartupTimeout, nil))
+
+ go grpcServer.Serve(lis)
+
+ ledger := ledger.InitTestLedger(t)
+
+ if ledger.GetBlockchainSize() != 0 {
+ t.Fatalf("Expected blockchain size of 0, but got %d", ledger.GetBlockchainSize())
+ }
+
+ makeGenesisErr := MakeGenesis()
+ if makeGenesisErr != nil {
+ t.Fatalf("Error creating genesis block, %s", makeGenesisErr)
+ }
+ if ledger.GetBlockchainSize() != 1 {
+ t.Fatalf("Expected blockchain size of 1, but got %d", ledger.GetBlockchainSize())
+ }
+}
+
+func setupTestConfig() {
+ viper.AddConfigPath(".")
+ viper.SetConfigName("genesis_test")
+ viper.Set("ledger.blockchain.deploy-system-chaincode", "false")
+ err := viper.ReadInConfig()
+ if err != nil { // Handle errors reading the config file
+ panic(fmt.Errorf("Fatal error config file: %s \n", err))
+ }
+}
diff --git a/core/ledger/genesis/genesis_test.yaml b/core/ledger/genesis/genesis_test.yaml
new file mode 100644
index 00000000000..c9449032e59
--- /dev/null
+++ b/core/ledger/genesis/genesis_test.yaml
@@ -0,0 +1,234 @@
+---
+###############################################################################
+#
+# CLI section
+#
+###############################################################################
+cli:
+
+ # The address that the cli process will use for callbacks from chaincodes
+ address: 0.0.0.0:30304
+
+
+
+###############################################################################
+#
+# REST section
+#
+###############################################################################
+rest:
+
+ # The address that the REST service will listen on for incoming requests.
+ address: 0.0.0.0:5000
+
+
+
+###############################################################################
+#
+# Peer section
+#
+###############################################################################
+peer:
+
+ # Peeer Version following version semantics as described here http://semver.org/
+ # The Peer supplies this version in communications with other Peers
+ version: 0.1.0
+
+ # The Peer id is used for identifying this Peer instance.
+ id: jdoe
+
+ # The privateKey to be used by this peer
+ privateKey: 794ef087680e2494fa4918fd8fb80fb284b50b57d321a31423fe42b9ccf6216047cea0b66fe8365a8e3f2a8140c6866cc45852e63124668bee1daa9c97da0c2a
+
+ # The networkId allows for logical seperation of networks
+ # networkId: dev
+ # networkId: test
+ networkId: dev
+
+ Dockerfile: |
+ from hyperledger/fabric-baseimage
+ # Copy GOPATH src and install Peer
+ COPY src $GOPATH/src
+ RUN mkdir -p /var/hyperledger/db
+ WORKDIR $GOPATH/src/github.com/hyperledger/fabric/peer/
+ RUN CGO_CFLAGS=" " CGO_LDFLAGS="-lrocksdb -lstdc++ -lm -lz -lbz2 -lsnappy" go install && cp $GOPATH/src/github.com/hyperledger/fabric/peer/core.yaml $GOPATH/bin
+
+ # The Address this Peer will bind to for providing services
+ address: 0.0.0.0:30303
+ # Whether the Peer should programmatically determine the address to bind to. This case is useful for docker containers.
+ addressAutoDetect: false
+
+
+ # Logging settings
+ logging:
+ # Logging level, can be one of [error|warning|info|debug]
+ # One of: CRITICAL | ERROR | WARNING | NOTICE | INFO | DEBUG
+ level: DEBUG
+
+ # Peer port to accept connections on
+ port: 30303
+ # Peer's setting for GOMAXPROCS
+ gomaxprocs: 2
+ workers: 2
+
+ # Validator defines whether this peer is a validating peer or not, and if
+ # it is enabled, what consensus plugin to load
+ validator:
+ enabled: true
+ # Consensus plugin to use. The value is the name of the plugin; ie bpft, noops
+ consensus: noops
+
+ # TLS Settings for p2p communications
+ tls:
+ enabled: false
+ cert:
+ file: testdata/server1.pem
+ key:
+ file: testdata/server1.key
+ # The server name use to verify the hostname returned by TLS handshake
+ serverhostoverride:
+
+ # Peer discovery settings. Controls how this peer discovers other peers
+ discovery:
+
+ # The root nodes are used for bootstrapping purposes, and generally supplied through ENV variables
+ rootnode:
+
+ # The duration of time between attempts to asks peers for their connected peers
+ period: 5s
+
+ ## leaving this in for example of sub map entry
+ # testNodes:
+ # - node : 1
+ # ip : 127.0.0.1
+ # port : 30303
+ # - node : 2
+ # ip : 127.0.0.1
+ # port : 30303
+
+ # Should the discovered nodes and their reputations
+ # be stored in DB and persisted between restarts
+ persist: true
+
+ # if peer discovery is off
+ # the peer window will show
+ # only what retrieved by active
+ # peer [true/false]
+ enabled: true
+
+ # number of workers that
+ # tastes the peers for being
+ # online [1..10]
+ workers: 8
+
+ # the period in seconds with which the discovery
+ # tries to reconnect to successful nodes
+ # 0 means the nodes are not reconnected
+ touchPeriod: 600
+
+ # the maximum nuber of nodes to reconnect to
+ # -1 for unlimited
+ touchMaxNodes: 100
+
+ # Path on the file system where peer will store data
+ fileSystemPath: /var/hyperledger/test/genesis_test
+
+### NOTE: The validator section below is not needed and will be removed - BN
+###############################################################################
+#
+# Validator section
+#
+###############################################################################
+validator:
+ enabled: false
+ address: 0.0.0.0:30304
+ # TLS Settings for p2p communications
+ tls:
+ enabled: false
+ cert:
+ file: testdata/server1.pem
+ key:
+ file: testdata/server1.key
+ # The server name use to verify the hostname returned by TLS handshake
+ serverhostoverride:
+ # Peer discovery settings. Controls how this peer discovers other peers
+ discovery:
+
+ # The root nodes are used for bootstrapping purposes, and generally supplied through ENV variables
+ rootnode:
+
+###############################################################################
+#
+# VM section
+#
+###############################################################################
+vm:
+
+ # Endpoint of the vm management system. For docker can be one of the following in general
+ # unix:///var/run/docker.sock
+ # http://localhost:2375
+ endpoint: unix:///var/run/docker.sock
+
+
+###############################################################################
+#
+# Chaincode section
+#
+###############################################################################
+chaincode:
+
+ # The id is used by the Chaincode stub to register the executing ChaincodeID with the Peerand is generally supplied through ENV variables
+ id:
+ url:
+ version:
+
+ golang:
+
+ # This is the basis for the Golang Dockerfile. Additional commands will be appended depedendent upon the chaincode specification.
+ Dockerfile: |
+ from hyperledger/fabric-baseimage
+ COPY src $GOPATH/src
+ WORKDIR $GOPATH
+
+ #timeout for starting up a container and waiting for Register to come through
+ startuptimeout: 20000
+
+ #mode - options are "dev", "net"
+ #dev - in dev mode, user runs the chaincode after starting validator from command line on local machine
+ #net - in net mode validator will run chaincode in a docker container
+
+ mode: net
+
+ installpath: /opt/gopath/bin/
+
+###############################################################################
+#
+# Ledger section - ledger configuration encompases both the blockchain
+# and the state
+#
+###############################################################################
+ledger:
+
+ blockchain:
+
+ # Define the genesis block
+ genesisBlock:
+
+ # Deploy chaincodes into the genesis block
+ chaincode:
+ path: github.com/hyperledger/fabric/core/example/chaincode/chaincode_example01
+ type: GOLANG
+ constructor:
+ func: init
+ args:
+ - alice
+ - "4"
+ - bob
+ - "10"
+
+ state:
+
+ # Control the number state deltas that are maintained. This takes additional
+ # disk space, but allow the state to be rolled backwards and forwards
+ # without the need to replay transactions.
+ deltaHistorySize: 500
diff --git a/core/ledger/ledger.go b/core/ledger/ledger.go
new file mode 100644
index 00000000000..14ff6bb4cc6
--- /dev/null
+++ b/core/ledger/ledger.go
@@ -0,0 +1,493 @@
+/*
+Copyright IBM Corp. 2016 All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package ledger
+
+import (
+ "bytes"
+ "fmt"
+ "reflect"
+ "sync"
+
+ "github.com/golang/protobuf/proto"
+ "github.com/hyperledger/fabric/core/db"
+ "github.com/hyperledger/fabric/core/ledger/statemgmt"
+ "github.com/hyperledger/fabric/core/ledger/statemgmt/state"
+ "github.com/hyperledger/fabric/events/producer"
+ "github.com/op/go-logging"
+ "github.com/tecbot/gorocksdb"
+
+ "github.com/hyperledger/fabric/protos"
+ "golang.org/x/net/context"
+)
+
+var ledgerLogger = logging.MustGetLogger("ledger")
+
+//ErrorType represents the type of a ledger error
+type ErrorType string
+
+const (
+ //ErrorTypeInvalidArgument used to indicate the invalid input to ledger method
+ ErrorTypeInvalidArgument = ErrorType("InvalidArgument")
+ //ErrorTypeOutOfBounds used to indicate that a request is out of bounds
+ ErrorTypeOutOfBounds = ErrorType("OutOfBounds")
+ //ErrorTypeResourceNotFound used to indicate if a resource is not found
+ ErrorTypeResourceNotFound = ErrorType("ResourceNotFound")
+ //ErrorTypeBlockNotFound used to indicate if a block is not found when looked up by it's hash
+ ErrorTypeBlockNotFound = ErrorType("ErrorTypeBlockNotFound")
+)
+
+//Error can be used for throwing an error from ledger code.
+type Error struct {
+ errType ErrorType
+ msg string
+}
+
+func (ledgerError *Error) Error() string {
+ return fmt.Sprintf("LedgerError - %s: %s", ledgerError.errType, ledgerError.msg)
+}
+
+//Type returns the type of the error
+func (ledgerError *Error) Type() ErrorType {
+ return ledgerError.errType
+}
+
+func newLedgerError(errType ErrorType, msg string) *Error {
+ return &Error{errType, msg}
+}
+
+var (
+ // ErrOutOfBounds is returned if a request is out of bounds
+ ErrOutOfBounds = newLedgerError(ErrorTypeOutOfBounds, "ledger: out of bounds")
+
+ // ErrResourceNotFound is returned if a resource is not found
+ ErrResourceNotFound = newLedgerError(ErrorTypeResourceNotFound, "ledger: resource not found")
+)
+
+// Ledger - the struct for openchain ledger
+type Ledger struct {
+ blockchain *blockchain
+ state *state.State
+ currentID interface{}
+}
+
+var ledger *Ledger
+var ledgerError error
+var once sync.Once
+
+// GetLedger - gives a reference to a 'singleton' ledger
+func GetLedger() (*Ledger, error) {
+ once.Do(func() {
+ ledger, ledgerError = GetNewLedger()
+ })
+ return ledger, ledgerError
+}
+
+// GetNewLedger - gives a reference to a new ledger TODO need better approach
+func GetNewLedger() (*Ledger, error) {
+ blockchain, err := newBlockchain()
+ if err != nil {
+ return nil, err
+ }
+
+ state := state.NewState()
+ return &Ledger{blockchain, state, nil}, nil
+}
+
+/////////////////// Transaction-batch related methods ///////////////////////////////
+/////////////////////////////////////////////////////////////////////////////////////
+
+// BeginTxBatch - gets invoked when next round of transaction-batch execution begins
+func (ledger *Ledger) BeginTxBatch(id interface{}) error {
+ err := ledger.checkValidIDBegin()
+ if err != nil {
+ return err
+ }
+ ledger.currentID = id
+ return nil
+}
+
+// GetTXBatchPreviewBlockInfo returns a preview block info that will
+// contain the same information as GetBlockchainInfo will return after
+// ledger.CommitTxBatch is called with the same parameters. If the
+// state is modified by a transaction between these two calls, the
+// contained hash will be different.
+func (ledger *Ledger) GetTXBatchPreviewBlockInfo(id interface{},
+ transactions []*protos.Transaction, metadata []byte) (*protos.BlockchainInfo, error) {
+ err := ledger.checkValidIDCommitORRollback(id)
+ if err != nil {
+ return nil, err
+ }
+ stateHash, err := ledger.state.GetHash()
+ if err != nil {
+ return nil, err
+ }
+ block := ledger.blockchain.buildBlock(protos.NewBlock(transactions, metadata), stateHash)
+ info := ledger.blockchain.getBlockchainInfoForBlock(ledger.blockchain.getSize()+1, block)
+ return info, nil
+}
+
+// CommitTxBatch - gets invoked when the current transaction-batch needs to be committed
+// This function returns successfully iff the transactions details and state changes (that
+// may have happened during execution of this transaction-batch) have been committed to permanent storage
+func (ledger *Ledger) CommitTxBatch(id interface{}, transactions []*protos.Transaction, transactionResults []*protos.TransactionResult, metadata []byte) error {
+ err := ledger.checkValidIDCommitORRollback(id)
+ if err != nil {
+ return err
+ }
+
+ stateHash, err := ledger.state.GetHash()
+ if err != nil {
+ ledger.resetForNextTxGroup(false)
+ ledger.blockchain.blockPersistenceStatus(false)
+ return err
+ }
+
+ writeBatch := gorocksdb.NewWriteBatch()
+ defer writeBatch.Destroy()
+ block := protos.NewBlock(transactions, metadata)
+ block.NonHashData = &protos.NonHashData{}
+ newBlockNumber, err := ledger.blockchain.addPersistenceChangesForNewBlock(context.TODO(), block, stateHash, writeBatch)
+ if err != nil {
+ ledger.resetForNextTxGroup(false)
+ ledger.blockchain.blockPersistenceStatus(false)
+ return err
+ }
+ ledger.state.AddChangesForPersistence(newBlockNumber, writeBatch)
+ opt := gorocksdb.NewDefaultWriteOptions()
+ defer opt.Destroy()
+ dbErr := db.GetDBHandle().DB.Write(opt, writeBatch)
+ if dbErr != nil {
+ ledger.resetForNextTxGroup(false)
+ ledger.blockchain.blockPersistenceStatus(false)
+ return dbErr
+ }
+
+ ledger.resetForNextTxGroup(true)
+ ledger.blockchain.blockPersistenceStatus(true)
+
+ sendProducerBlockEvent(block)
+ if len(transactionResults) != 0 {
+ ledgerLogger.Debug("There were some erroneous transactions. We need to send a 'TX rejected' message here.")
+ }
+ return nil
+}
+
+// RollbackTxBatch - Discards all the state changes that may have taken place during the execution of
+// current transaction-batch
+func (ledger *Ledger) RollbackTxBatch(id interface{}) error {
+ ledgerLogger.Debugf("RollbackTxBatch for id = [%s]", id)
+ err := ledger.checkValidIDCommitORRollback(id)
+ if err != nil {
+ return err
+ }
+ ledger.resetForNextTxGroup(false)
+ return nil
+}
+
+// TxBegin - Marks the begin of a new transaction in the ongoing batch
+func (ledger *Ledger) TxBegin(txUUID string) {
+ ledger.state.TxBegin(txUUID)
+}
+
+// TxFinished - Marks the finish of the on-going transaction.
+// If txSuccessful is false, the state changes made by the transaction are discarded
+func (ledger *Ledger) TxFinished(txUUID string, txSuccessful bool) {
+ ledger.state.TxFinish(txUUID, txSuccessful)
+}
+
+/////////////////// world-state related methods /////////////////////////////////////
+/////////////////////////////////////////////////////////////////////////////////////
+
+// GetTempStateHash - Computes state hash by taking into account the state changes that may have taken
+// place during the execution of current transaction-batch
+func (ledger *Ledger) GetTempStateHash() ([]byte, error) {
+ return ledger.state.GetHash()
+}
+
+// GetTempStateHashWithTxDeltaStateHashes - In addition to the state hash (as defined in method GetTempStateHash),
+// this method returns a map [txUuid of Tx --> cryptoHash(stateChangesMadeByTx)]
+// Only successful txs appear in this map
+func (ledger *Ledger) GetTempStateHashWithTxDeltaStateHashes() ([]byte, map[string][]byte, error) {
+ stateHash, err := ledger.state.GetHash()
+ return stateHash, ledger.state.GetTxStateDeltaHash(), err
+}
+
+// GetState get state for chaincodeID and key. If committed is false, this first looks in memory
+// and if missing, pulls from db. If committed is true, this pulls from the db only.
+func (ledger *Ledger) GetState(chaincodeID string, key string, committed bool) ([]byte, error) {
+ return ledger.state.Get(chaincodeID, key, committed)
+}
+
+// GetStateRangeScanIterator returns an iterator to get all the keys (and values) between startKey and endKey
+// (assuming lexical order of the keys) for a chaincodeID.
+// If committed is true, the key-values are retrieved only from the db. If committed is false, the results from db
+// are mergerd with the results in memory (giving preference to in-memory data)
+// The key-values in the returned iterator are not guaranteed to be in any specific order
+func (ledger *Ledger) GetStateRangeScanIterator(chaincodeID string, startKey string, endKey string, committed bool) (statemgmt.RangeScanIterator, error) {
+ return ledger.state.GetRangeScanIterator(chaincodeID, startKey, endKey, committed)
+}
+
+// SetState sets state to given value for chaincodeID and key. Does not immideatly writes to DB
+func (ledger *Ledger) SetState(chaincodeID string, key string, value []byte) error {
+ if key == "" || value == nil {
+ return newLedgerError(ErrorTypeInvalidArgument,
+ fmt.Sprintf("An empty string key or a nil value is not supported. Method invoked with key='%s', value='%#v'", key, value))
+ }
+ return ledger.state.Set(chaincodeID, key, value)
+}
+
+// DeleteState tracks the deletion of state for chaincodeID and key. Does not immediately writes to DB
+func (ledger *Ledger) DeleteState(chaincodeID string, key string) error {
+ return ledger.state.Delete(chaincodeID, key)
+}
+
+// CopyState copies all the key-values from sourceChaincodeID to destChaincodeID
+func (ledger *Ledger) CopyState(sourceChaincodeID string, destChaincodeID string) error {
+ return ledger.state.CopyState(sourceChaincodeID, destChaincodeID)
+}
+
+// GetStateMultipleKeys returns the values for the multiple keys.
+// This method is mainly to amortize the cost of grpc communication between chaincode shim peer
+func (ledger *Ledger) GetStateMultipleKeys(chaincodeID string, keys []string, committed bool) ([][]byte, error) {
+ return ledger.state.GetMultipleKeys(chaincodeID, keys, committed)
+}
+
+// SetStateMultipleKeys sets the values for the multiple keys.
+// This method is mainly to amortize the cost of grpc communication between chaincode shim peer
+func (ledger *Ledger) SetStateMultipleKeys(chaincodeID string, kvs map[string][]byte) error {
+ return ledger.state.SetMultipleKeys(chaincodeID, kvs)
+}
+
+// GetStateSnapshot returns a point-in-time view of the global state for the current block. This
+// should be used when transferring the state from one peer to another peer. You must call
+// stateSnapshot.Release() once you are done with the snapshot to free up resources.
+func (ledger *Ledger) GetStateSnapshot() (*state.StateSnapshot, error) {
+ dbSnapshot := db.GetDBHandle().GetSnapshot()
+ blockHeight, err := fetchBlockchainSizeFromSnapshot(dbSnapshot)
+ if err != nil {
+ dbSnapshot.Release()
+ return nil, err
+ }
+ if 0 == blockHeight {
+ dbSnapshot.Release()
+ return nil, fmt.Errorf("Blockchain has no blocks, cannot determine block number")
+ }
+ return ledger.state.GetSnapshot(blockHeight-1, dbSnapshot)
+}
+
+// GetStateDelta will return the state delta for the specified block if
+// available. If not available because it has been discarded, returns nil,nil.
+func (ledger *Ledger) GetStateDelta(blockNumber uint64) (*statemgmt.StateDelta, error) {
+ if blockNumber >= ledger.GetBlockchainSize() {
+ return nil, ErrOutOfBounds
+ }
+ return ledger.state.FetchStateDeltaFromDB(blockNumber)
+}
+
+// ApplyStateDelta applies a state delta to the current state. This is an
+// in memory change only. You must call ledger.CommitStateDelta to persist
+// the change to the DB.
+// This should only be used as part of state synchronization. State deltas
+// can be retrieved from another peer though the Ledger.GetStateDelta function
+// or by creating state deltas with keys retrieved from
+// Ledger.GetStateSnapshot(). For an example, see TestSetRawState in
+// ledger_test.go
+// Note that there is no order checking in this function and it is up to
+// the caller to ensure that deltas are applied in the correct order.
+// For example, if you are currently at block 8 and call this function
+// with a delta retrieved from Ledger.GetStateDelta(10), you would now
+// be in a bad state because you did not apply the delta for block 9.
+// It's possible to roll the state forwards or backwards using
+// stateDelta.RollBackwards. By default, a delta retrieved for block 3 can
+// be used to roll forwards from state at block 2 to state at block 3. If
+// stateDelta.RollBackwards=false, the delta retrieved for block 3 can be
+// used to roll backwards from the state at block 3 to the state at block 2.
+func (ledger *Ledger) ApplyStateDelta(id interface{}, delta *statemgmt.StateDelta) error {
+ err := ledger.checkValidIDBegin()
+ if err != nil {
+ return err
+ }
+ ledger.currentID = id
+ ledger.state.ApplyStateDelta(delta)
+ return nil
+}
+
+// CommitStateDelta will commit the state delta passed to ledger.ApplyStateDelta
+// to the DB
+func (ledger *Ledger) CommitStateDelta(id interface{}) error {
+ err := ledger.checkValidIDCommitORRollback(id)
+ if err != nil {
+ return err
+ }
+ defer ledger.resetForNextTxGroup(true)
+ return ledger.state.CommitStateDelta()
+}
+
+// RollbackStateDelta will discard the state delta passed
+// to ledger.ApplyStateDelta
+func (ledger *Ledger) RollbackStateDelta(id interface{}) error {
+ err := ledger.checkValidIDCommitORRollback(id)
+ if err != nil {
+ return err
+ }
+ ledger.resetForNextTxGroup(false)
+ return nil
+}
+
+// DeleteALLStateKeysAndValues deletes all keys and values from the state.
+// This is generally only used during state synchronization when creating a
+// new state from a snapshot.
+func (ledger *Ledger) DeleteALLStateKeysAndValues() error {
+ return ledger.state.DeleteState()
+}
+
+/////////////////// blockchain related methods /////////////////////////////////////
+/////////////////////////////////////////////////////////////////////////////////////
+
+// GetBlockchainInfo returns information about the blockchain ledger such as
+// height, current block hash, and previous block hash.
+func (ledger *Ledger) GetBlockchainInfo() (*protos.BlockchainInfo, error) {
+ return ledger.blockchain.getBlockchainInfo()
+}
+
+// GetBlockByNumber return block given the number of the block on blockchain.
+// Lowest block on chain is block number zero
+func (ledger *Ledger) GetBlockByNumber(blockNumber uint64) (*protos.Block, error) {
+ if blockNumber >= ledger.GetBlockchainSize() {
+ return nil, ErrOutOfBounds
+ }
+ return ledger.blockchain.getBlock(blockNumber)
+}
+
+// GetBlockchainSize returns number of blocks in blockchain
+func (ledger *Ledger) GetBlockchainSize() uint64 {
+ return ledger.blockchain.getSize()
+}
+
+// GetTransactionByUUID return transaction by it's uuid
+func (ledger *Ledger) GetTransactionByUUID(txUUID string) (*protos.Transaction, error) {
+ return ledger.blockchain.getTransactionByUUID(txUUID)
+}
+
+// PutRawBlock puts a raw block on the chain. This function should only be
+// used for synchronization between peers.
+func (ledger *Ledger) PutRawBlock(block *protos.Block, blockNumber uint64) error {
+ err := ledger.blockchain.persistRawBlock(block, blockNumber)
+ if err != nil {
+ return err
+ }
+ sendProducerBlockEvent(block)
+ return nil
+}
+
+// VerifyChain will verify the integrity of the blockchain. This is accomplished
+// by ensuring that the previous block hash stored in each block matches
+// the actual hash of the previous block in the chain. The return value is the
+// block number of lowest block in the range which can be verified as valid.
+// The first block is assumed to be valid, and an error is only returned if the
+// first block does not exist, or some other sort of irrecoverable ledger error
+// such as the first block failing to hash is encountered.
+// For example, if VerifyChain(0, 99) is called and previous hash values stored
+// in blocks 8, 32, and 42 do not match the actual hashes of respective previous
+// block 42 would be the return value from this function.
+// highBlock is the high block in the chain to include in verification. If you
+// wish to verify the entire chain, use ledger.GetBlockchainSize() - 1.
+// lowBlock is the low block in the chain to include in verification. If
+// you wish to verify the entire chain, use 0 for the genesis block.
+func (ledger *Ledger) VerifyChain(highBlock, lowBlock uint64) (uint64, error) {
+ if highBlock >= ledger.GetBlockchainSize() {
+ return highBlock, ErrOutOfBounds
+ }
+ if highBlock < lowBlock {
+ return lowBlock, ErrOutOfBounds
+ }
+
+ currentBlock, err := ledger.GetBlockByNumber(highBlock)
+ if err != nil {
+ return highBlock, fmt.Errorf("Error fetching block %d.", highBlock)
+ }
+ if currentBlock == nil {
+ return highBlock, fmt.Errorf("Block %d is nil.", highBlock)
+ }
+
+ for i := highBlock; i > lowBlock; i-- {
+ previousBlock, err := ledger.GetBlockByNumber(i - 1)
+ if err != nil {
+ return i, nil
+ }
+ if previousBlock == nil {
+ return i, nil
+ }
+ previousBlockHash, err := previousBlock.GetHash()
+ if err != nil {
+ return i, nil
+ }
+ if bytes.Compare(previousBlockHash, currentBlock.PreviousBlockHash) != 0 {
+ return i, nil
+ }
+ currentBlock = previousBlock
+ }
+
+ return lowBlock, nil
+}
+
+func (ledger *Ledger) checkValidIDBegin() error {
+ if ledger.currentID != nil {
+ return fmt.Errorf("Another TxGroup [%s] already in-progress", ledger.currentID)
+ }
+ return nil
+}
+
+func (ledger *Ledger) checkValidIDCommitORRollback(id interface{}) error {
+ if !reflect.DeepEqual(ledger.currentID, id) {
+ return fmt.Errorf("Another TxGroup [%s] already in-progress", ledger.currentID)
+ }
+ return nil
+}
+
+func (ledger *Ledger) resetForNextTxGroup(txCommited bool) {
+ ledgerLogger.Debug("resetting ledger state for next transaction batch")
+ ledger.currentID = nil
+ ledger.state.ClearInMemoryChanges(txCommited)
+}
+
+func sendProducerBlockEvent(block *protos.Block) {
+
+ // Remove payload from deploy transactions. This is done to make block
+ // events more lightweight as the payload for these types of transactions
+ // can be very large.
+ blockTransactions := block.GetTransactions()
+ for _, transaction := range blockTransactions {
+ if transaction.Type == protos.Transaction_CHAINCODE_DEPLOY {
+ deploymentSpec := &protos.ChaincodeDeploymentSpec{}
+ err := proto.Unmarshal(transaction.Payload, deploymentSpec)
+ if err != nil {
+ ledgerLogger.Errorf("Error unmarshalling deployment transaction for block event: %s", err)
+ continue
+ }
+ deploymentSpec.CodePackage = nil
+ deploymentSpecBytes, err := proto.Marshal(deploymentSpec)
+ if err != nil {
+ ledgerLogger.Errorf("Error marshalling deployment transaction for block event: %s", err)
+ continue
+ }
+ transaction.Payload = deploymentSpecBytes
+ }
+ }
+
+ producer.Send(producer.CreateBlockEvent(block))
+}
diff --git a/core/ledger/ledger_test.go b/core/ledger/ledger_test.go
new file mode 100644
index 00000000000..c4e014e966f
--- /dev/null
+++ b/core/ledger/ledger_test.go
@@ -0,0 +1,918 @@
+/*
+Copyright IBM Corp. 2016 All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package ledger
+
+import (
+ "bytes"
+ "strconv"
+ "testing"
+
+ "github.com/hyperledger/fabric/core/ledger/statemgmt"
+ "github.com/hyperledger/fabric/core/ledger/testutil"
+ "github.com/hyperledger/fabric/protos"
+)
+
+func TestLedgerCommit(t *testing.T) {
+ ledgerTestWrapper := createFreshDBAndTestLedgerWrapper(t)
+ ledger := ledgerTestWrapper.ledger
+ ledger.BeginTxBatch(1)
+ ledger.TxBegin("txUuid")
+ ledger.SetState("chaincode1", "key1", []byte("value1"))
+ ledger.SetState("chaincode2", "key2", []byte("value2"))
+ ledger.SetState("chaincode3", "key3", []byte("value3"))
+ ledger.TxFinished("txUuid", true)
+ transaction, _ := buildTestTx(t)
+ ledger.CommitTxBatch(1, []*protos.Transaction{transaction}, nil, []byte("proof"))
+ testutil.AssertEquals(t, ledgerTestWrapper.GetState("chaincode1", "key1", false), []byte("value1"))
+ testutil.AssertEquals(t, ledgerTestWrapper.GetState("chaincode1", "key1", true), []byte("value1"))
+}
+
+func TestLedgerRollback(t *testing.T) {
+ ledgerTestWrapper := createFreshDBAndTestLedgerWrapper(t)
+ ledger := ledgerTestWrapper.ledger
+ ledger.BeginTxBatch(1)
+ ledger.TxBegin("txUuid")
+ ledger.SetState("chaincode1", "key1", []byte("value1"))
+ ledger.SetState("chaincode2", "key2", []byte("value2"))
+ ledger.SetState("chaincode3", "key3", []byte("value3"))
+ ledger.TxFinished("txUuid", true)
+ ledger.RollbackTxBatch(1)
+ testutil.AssertNil(t, ledgerTestWrapper.GetState("chaincode1", "key1", false))
+}
+
+func TestLedgerRollbackWithHash(t *testing.T) {
+ ledgerTestWrapper := createFreshDBAndTestLedgerWrapper(t)
+ ledger := ledgerTestWrapper.ledger
+
+ ledger.BeginTxBatch(0)
+ ledger.TxBegin("txUuid")
+ ledger.SetState("chaincode0", "key1", []byte("value1"))
+ ledger.SetState("chaincode0", "key2", []byte("value2"))
+ ledger.SetState("chaincode0", "key3", []byte("value3"))
+ ledger.TxFinished("txUuid", true)
+ ledger.RollbackTxBatch(0)
+
+ hash0 := ledgerTestWrapper.GetTempStateHash()
+
+ ledger.BeginTxBatch(1)
+ ledger.TxBegin("txUuid")
+ ledger.SetState("chaincode1", "key1", []byte("value1"))
+ ledger.SetState("chaincode2", "key2", []byte("value2"))
+ ledger.SetState("chaincode3", "key3", []byte("value3"))
+ ledger.TxFinished("txUuid", true)
+
+ hash1 := ledgerTestWrapper.GetTempStateHash()
+ testutil.AssertNotEquals(t, hash1, hash0)
+
+ ledger.RollbackTxBatch(1)
+ hash1 = ledgerTestWrapper.GetTempStateHash()
+ testutil.AssertEquals(t, hash1, hash0)
+ testutil.AssertNil(t, ledgerTestWrapper.GetState("chaincode1", "key1", false))
+}
+
+func TestLedgerDifferentID(t *testing.T) {
+ ledgerTestWrapper := createFreshDBAndTestLedgerWrapper(t)
+ ledger := ledgerTestWrapper.ledger
+ ledger.BeginTxBatch(1)
+ ledger.TxBegin("txUuid")
+ ledger.SetState("chaincode1", "key1", []byte("value1"))
+ ledger.SetState("chaincode2", "key2", []byte("value2"))
+ ledger.SetState("chaincode3", "key3", []byte("value3"))
+ ledger.TxFinished("txUuid", true)
+ transaction, _ := buildTestTx(t)
+ err := ledger.CommitTxBatch(2, []*protos.Transaction{transaction}, nil, []byte("prrof"))
+ testutil.AssertError(t, err, "ledger should throw error for wrong batch ID")
+}
+
+func TestLedgerGetTempStateHashWithTxDeltaStateHashes(t *testing.T) {
+ ledgerTestWrapper := createFreshDBAndTestLedgerWrapper(t)
+ ledger := ledgerTestWrapper.ledger
+ ledger.BeginTxBatch(1)
+ ledger.TxBegin("txUuid1")
+ ledger.SetState("chaincode1", "key1", []byte("value1"))
+ ledger.TxFinished("txUuid1", true)
+
+ ledger.TxBegin("txUuid2")
+ ledger.SetState("chaincode2", "key2", []byte("value2"))
+ ledger.TxFinished("txUuid2", true)
+
+ ledger.TxBegin("txUuid3")
+ ledger.TxFinished("txUuid3", true)
+
+ ledger.TxBegin("txUuid4")
+ ledger.SetState("chaincode4", "key4", []byte("value4"))
+ ledger.TxFinished("txUuid4", false)
+
+ _, txDeltaHashes, _ := ledger.GetTempStateHashWithTxDeltaStateHashes()
+ testutil.AssertEquals(t, testutil.ComputeCryptoHash([]byte("chaincode1key1value1")), txDeltaHashes["txUuid1"])
+ testutil.AssertEquals(t, testutil.ComputeCryptoHash([]byte("chaincode2key2value2")), txDeltaHashes["txUuid2"])
+ testutil.AssertNil(t, txDeltaHashes["txUuid3"])
+ _, ok := txDeltaHashes["txUuid4"]
+ if ok {
+ t.Fatalf("Entry for a failed Tx should not be present in txDeltaHashes map")
+ }
+ ledger.CommitTxBatch(1, []*protos.Transaction{}, nil, []byte("proof"))
+
+ ledger.BeginTxBatch(2)
+ ledger.TxBegin("txUuid1")
+ ledger.SetState("chaincode1", "key1", []byte("value1"))
+ ledger.TxFinished("txUuid1", true)
+ _, txDeltaHashes, _ = ledger.GetTempStateHashWithTxDeltaStateHashes()
+ if len(txDeltaHashes) != 1 {
+ t.Fatalf("Entries in txDeltaHashes map should only be from current batch")
+ }
+}
+
+func TestLedgerStateSnapshot(t *testing.T) {
+ ledgerTestWrapper := createFreshDBAndTestLedgerWrapper(t)
+ ledger := ledgerTestWrapper.ledger
+ ledger.BeginTxBatch(1)
+ ledger.TxBegin("txUuid")
+ ledger.SetState("chaincode1", "key1", []byte("value1"))
+ ledger.SetState("chaincode2", "key2", []byte("value2"))
+ ledger.SetState("chaincode3", "key3", []byte("value3"))
+ ledger.TxFinished("txUuid", true)
+ transaction, _ := buildTestTx(t)
+ ledger.CommitTxBatch(1, []*protos.Transaction{transaction}, nil, []byte("proof"))
+
+ snapshot, err := ledger.GetStateSnapshot()
+
+ if err != nil {
+ t.Fatalf("Error fetching snapshot %s", err)
+ }
+ defer snapshot.Release()
+
+ // Modify keys to ensure they do not impact the snapshot
+ ledger.BeginTxBatch(2)
+ ledger.TxBegin("txUuid")
+ ledger.DeleteState("chaincode1", "key1")
+ ledger.SetState("chaincode4", "key4", []byte("value4"))
+ ledger.SetState("chaincode5", "key5", []byte("value5"))
+ ledger.SetState("chaincode6", "key6", []byte("value6"))
+ ledger.TxFinished("txUuid", true)
+ transaction, _ = buildTestTx(t)
+ ledger.CommitTxBatch(2, []*protos.Transaction{transaction}, nil, []byte("proof"))
+
+ var count = 0
+ for snapshot.Next() {
+ k, v := snapshot.GetRawKeyValue()
+ t.Logf("Key %v, Val %v", k, v)
+ count++
+ }
+ if count != 3 {
+ t.Fatalf("Expected 3 keys, but got %d", count)
+ }
+
+ if snapshot.GetBlockNumber() != 0 {
+ t.Fatalf("Expected blocknumber to be 0, but got %d", snapshot.GetBlockNumber())
+ }
+
+}
+
+func TestLedgerPutRawBlock(t *testing.T) {
+ ledgerTestWrapper := createFreshDBAndTestLedgerWrapper(t)
+ ledger := ledgerTestWrapper.ledger
+ block := new(protos.Block)
+ block.PreviousBlockHash = []byte("foo")
+ block.StateHash = []byte("bar")
+ ledger.PutRawBlock(block, 4)
+ testutil.AssertEquals(t, ledgerTestWrapper.GetBlockByNumber(4), block)
+
+ ledger.BeginTxBatch(1)
+ ledger.TxBegin("txUuid")
+ ledger.SetState("chaincode1", "key1", []byte("value1"))
+ ledger.TxFinished("txUuid", true)
+ transaction, _ := buildTestTx(t)
+ ledger.CommitTxBatch(1, []*protos.Transaction{transaction}, nil, []byte("proof"))
+
+ previousHash, _ := block.GetHash()
+ newBlock := ledgerTestWrapper.GetBlockByNumber(5)
+
+ if !bytes.Equal(newBlock.PreviousBlockHash, previousHash) {
+ t.Fatalf("Expected new block to properly set its previous hash")
+ }
+
+ // Assert that a non-existent block is nil
+ testutil.AssertNil(t, ledgerTestWrapper.GetBlockByNumber(2))
+}
+
+func TestLedgerSetRawState(t *testing.T) {
+ ledgerTestWrapper := createFreshDBAndTestLedgerWrapper(t)
+ ledger := ledgerTestWrapper.ledger
+ ledger.BeginTxBatch(1)
+ ledger.TxBegin("txUuid1")
+ ledger.SetState("chaincode1", "key1", []byte("value1"))
+ ledger.SetState("chaincode2", "key2", []byte("value2"))
+ ledger.SetState("chaincode3", "key3", []byte("value3"))
+ ledger.TxFinished("txUuid1", true)
+ transaction, _ := buildTestTx(t)
+ ledger.CommitTxBatch(1, []*protos.Transaction{transaction}, nil, []byte("proof"))
+
+ // Ensure values are in the DB
+ val := ledgerTestWrapper.GetState("chaincode1", "key1", true)
+ if bytes.Compare(val, []byte("value1")) != 0 {
+ t.Fatalf("Expected initial chaincode1 key1 to be %s, but got %s", []byte("value1"), val)
+ }
+ val = ledgerTestWrapper.GetState("chaincode2", "key2", true)
+ if bytes.Compare(val, []byte("value2")) != 0 {
+ t.Fatalf("Expected initial chaincode1 key2 to be %s, but got %s", []byte("value2"), val)
+ }
+ val = ledgerTestWrapper.GetState("chaincode3", "key3", true)
+ if bytes.Compare(val, []byte("value3")) != 0 {
+ t.Fatalf("Expected initial chaincode1 key3 to be %s, but got %s", []byte("value3"), val)
+ }
+
+ hash1, hash1Err := ledger.GetTempStateHash()
+ if hash1Err != nil {
+ t.Fatalf("Error getting hash1 %s", hash1Err)
+ }
+
+ snapshot, snapshotError := ledger.GetStateSnapshot()
+ if snapshotError != nil {
+ t.Fatalf("Error fetching snapshot %s", snapshotError)
+ }
+ defer snapshot.Release()
+
+ // Delete keys
+ ledger.BeginTxBatch(2)
+ ledger.TxBegin("txUuid2")
+ ledger.DeleteState("chaincode1", "key1")
+ ledger.DeleteState("chaincode2", "key2")
+ ledger.DeleteState("chaincode3", "key3")
+ ledger.TxFinished("txUuid2", true)
+ transaction, _ = buildTestTx(t)
+ ledger.CommitTxBatch(2, []*protos.Transaction{transaction}, nil, []byte("proof"))
+
+ // ensure keys are deleted
+ val = ledgerTestWrapper.GetState("chaincode1", "key1", true)
+ if val != nil {
+ t.Fatalf("Expected chaincode1 key1 to be nil, but got %s", val)
+ }
+ val = ledgerTestWrapper.GetState("chaincode2", "key2", true)
+ if val != nil {
+ t.Fatalf("Expected chaincode2 key2 to be nil, but got %s", val)
+ }
+ val = ledgerTestWrapper.GetState("chaincode3", "key3", true)
+ if val != nil {
+ t.Fatalf("Expected chaincode3 key3 to be nil, but got %s", val)
+ }
+
+ hash2, hash2Err := ledger.GetTempStateHash()
+ if hash2Err != nil {
+ t.Fatalf("Error getting hash2 %s", hash2Err)
+ }
+
+ if bytes.Compare(hash1, hash2) == 0 {
+ t.Fatalf("Expected hashes to not match, but they both equal %s", hash1)
+ }
+
+ // put key/values from the snapshot back in the DB
+ //var keys, values [][]byte
+ delta := statemgmt.NewStateDelta()
+ for i := 0; snapshot.Next(); i++ {
+ k, v := snapshot.GetRawKeyValue()
+ cID, keyID := statemgmt.DecodeCompositeKey(k)
+ delta.Set(cID, keyID, v, nil)
+ }
+
+ ledgerTestWrapper.ApplyStateDelta(1, delta)
+ ledgerTestWrapper.CommitStateDelta(1)
+
+ // Ensure values are back in the DB
+ val = ledgerTestWrapper.GetState("chaincode1", "key1", true)
+ if bytes.Compare(val, []byte("value1")) != 0 {
+ t.Fatalf("Expected chaincode1 key1 to be %s, but got %s", []byte("value1"), val)
+ }
+ val = ledgerTestWrapper.GetState("chaincode2", "key2", true)
+ if bytes.Compare(val, []byte("value2")) != 0 {
+ t.Fatalf("Expected chaincode1 key2 to be %s, but got %s", []byte("value2"), val)
+ }
+ val = ledgerTestWrapper.GetState("chaincode3", "key3", true)
+ if bytes.Compare(val, []byte("value3")) != 0 {
+ t.Fatalf("Expected chaincode1 key3 to be %s, but got %s", []byte("value3"), val)
+ }
+
+ hash3, hash3Err := ledger.GetTempStateHash()
+ if hash3Err != nil {
+ t.Fatalf("Error getting hash3 %s", hash3Err)
+ }
+ if bytes.Compare(hash1, hash3) != 0 {
+ t.Fatalf("Expected hashes to be equal, but they are %s and %s", hash1, hash3)
+ }
+}
+
+func TestDeleteAllStateKeysAndValues(t *testing.T) {
+ ledgerTestWrapper := createFreshDBAndTestLedgerWrapper(t)
+ ledger := ledgerTestWrapper.ledger
+ ledger.BeginTxBatch(1)
+ ledger.TxBegin("txUuid1")
+ ledger.SetState("chaincode1", "key1", []byte("value1"))
+ ledger.SetState("chaincode2", "key2", []byte("value2"))
+ ledger.SetState("chaincode3", "key3", []byte("value3"))
+ ledger.TxFinished("txUuid1", true)
+ transaction, _ := buildTestTx(t)
+ ledger.CommitTxBatch(1, []*protos.Transaction{transaction}, nil, []byte("proof"))
+
+ // Confirm values are present in state
+ testutil.AssertEquals(t, ledgerTestWrapper.GetState("chaincode1", "key1", true), []byte("value1"))
+ testutil.AssertEquals(t, ledgerTestWrapper.GetState("chaincode2", "key2", true), []byte("value2"))
+ testutil.AssertEquals(t, ledgerTestWrapper.GetState("chaincode3", "key3", true), []byte("value3"))
+
+ // Delete all keys/values
+ err := ledger.DeleteALLStateKeysAndValues()
+ if err != nil {
+ t.Fatalf("Error calling deleting all keys/values from state: %s", err)
+ }
+
+ // Confirm values are deleted
+ testutil.AssertNil(t, ledgerTestWrapper.GetState("chaincode1", "key1", true))
+ testutil.AssertNil(t, ledgerTestWrapper.GetState("chaincode2", "key2", true))
+ testutil.AssertNil(t, ledgerTestWrapper.GetState("chaincode3", "key3", true))
+
+ // Test that we can now store new stuff in the state
+ ledger.BeginTxBatch(2)
+ ledger.TxBegin("txUuid1")
+ ledger.SetState("chaincode1", "key1", []byte("value1"))
+ ledger.SetState("chaincode2", "key2", []byte("value2"))
+ ledger.SetState("chaincode3", "key3", []byte("value3"))
+ ledger.TxFinished("txUuid1", true)
+ transaction, _ = buildTestTx(t)
+ ledger.CommitTxBatch(2, []*protos.Transaction{transaction}, nil, []byte("proof"))
+
+ // Confirm values are present in state
+ testutil.AssertEquals(t, ledgerTestWrapper.GetState("chaincode1", "key1", true), []byte("value1"))
+ testutil.AssertEquals(t, ledgerTestWrapper.GetState("chaincode2", "key2", true), []byte("value2"))
+ testutil.AssertEquals(t, ledgerTestWrapper.GetState("chaincode3", "key3", true), []byte("value3"))
+}
+
+func TestVerifyChain(t *testing.T) {
+ ledgerTestWrapper := createFreshDBAndTestLedgerWrapper(t)
+ ledger := ledgerTestWrapper.ledger
+
+ // Build a big blockchain
+ for i := 0; i < 100; i++ {
+ ledger.BeginTxBatch(i)
+ ledger.TxBegin("txUuid" + strconv.Itoa(i))
+ ledger.SetState("chaincode"+strconv.Itoa(i), "key"+strconv.Itoa(i), []byte("value"+strconv.Itoa(i)))
+ ledger.TxFinished("txUuid"+strconv.Itoa(i), true)
+ transaction, _ := buildTestTx(t)
+ ledger.CommitTxBatch(i, []*protos.Transaction{transaction}, nil, []byte("proof"))
+ }
+
+ // Verify the chain
+ for lowBlock := uint64(0); lowBlock < ledger.GetBlockchainSize()-1; lowBlock++ {
+ testutil.AssertEquals(t, ledgerTestWrapper.VerifyChain(ledger.GetBlockchainSize()-1, lowBlock), lowBlock)
+ }
+ for highBlock := ledger.GetBlockchainSize() - 1; highBlock > 0; highBlock-- {
+ testutil.AssertEquals(t, ledgerTestWrapper.VerifyChain(highBlock, 0), uint64(0))
+ }
+
+ // Add bad blocks and test
+ badBlock := protos.NewBlock(nil, nil)
+ badBlock.PreviousBlockHash = []byte("evil")
+ for i := uint64(0); i < ledger.GetBlockchainSize(); i++ {
+ goodBlock := ledgerTestWrapper.GetBlockByNumber(i)
+ ledger.PutRawBlock(badBlock, i)
+ for lowBlock := uint64(0); lowBlock < ledger.GetBlockchainSize()-1; lowBlock++ {
+ if i == ledger.GetBlockchainSize()-1 {
+ testutil.AssertEquals(t, ledgerTestWrapper.VerifyChain(ledger.GetBlockchainSize()-1, lowBlock), uint64(i))
+ } else if i >= lowBlock {
+ testutil.AssertEquals(t, ledgerTestWrapper.VerifyChain(ledger.GetBlockchainSize()-1, lowBlock), uint64(i+1))
+ } else {
+ testutil.AssertEquals(t, ledgerTestWrapper.VerifyChain(ledger.GetBlockchainSize()-1, lowBlock), lowBlock)
+ }
+ }
+ for highBlock := ledger.GetBlockchainSize() - 1; highBlock != ^uint64(0); highBlock-- {
+ if i == highBlock {
+ testutil.AssertEquals(t, ledgerTestWrapper.VerifyChain(highBlock, 0), uint64(i))
+ } else if i < highBlock {
+ testutil.AssertEquals(t, ledgerTestWrapper.VerifyChain(highBlock, 0), uint64(i+1))
+ } else {
+ testutil.AssertEquals(t, ledgerTestWrapper.VerifyChain(highBlock, 0), uint64(0))
+ }
+ }
+ ledgerTestWrapper.PutRawBlock(goodBlock, i)
+ }
+
+ // Test edge cases
+ _, err := ledger.VerifyChain(2, 10)
+ testutil.AssertError(t, err, "Expected error as high block is less than low block")
+ _, err = ledger.VerifyChain(0, 100)
+ testutil.AssertError(t, err, "Expected error as high block is out of bounds")
+}
+
+func TestBlockNumberOutOfBoundsError(t *testing.T) {
+ ledgerTestWrapper := createFreshDBAndTestLedgerWrapper(t)
+ ledger := ledgerTestWrapper.ledger
+
+ // Build a big blockchain
+ for i := 0; i < 10; i++ {
+ ledger.BeginTxBatch(i)
+ ledger.TxBegin("txUuid" + strconv.Itoa(i))
+ ledger.SetState("chaincode"+strconv.Itoa(i), "key"+strconv.Itoa(i), []byte("value"+strconv.Itoa(i)))
+ ledger.TxFinished("txUuid"+strconv.Itoa(i), true)
+ transaction, _ := buildTestTx(t)
+ ledger.CommitTxBatch(i, []*protos.Transaction{transaction}, nil, []byte("proof"))
+ }
+
+ ledgerTestWrapper.GetBlockByNumber(9)
+ _, err := ledger.GetBlockByNumber(10)
+ testutil.AssertEquals(t, err, ErrOutOfBounds)
+
+ ledgerTestWrapper.GetStateDelta(9)
+ _, err = ledger.GetStateDelta(10)
+ testutil.AssertEquals(t, err, ErrOutOfBounds)
+
+}
+
+func TestRollBackwardsAndForwards(t *testing.T) {
+
+ ledgerTestWrapper := createFreshDBAndTestLedgerWrapper(t)
+ ledger := ledgerTestWrapper.ledger
+
+ // Block 0
+ ledger.BeginTxBatch(0)
+ ledger.TxBegin("txUuid1")
+ ledger.SetState("chaincode1", "key1", []byte("value1A"))
+ ledger.SetState("chaincode2", "key2", []byte("value2A"))
+ ledger.SetState("chaincode3", "key3", []byte("value3A"))
+ ledger.TxFinished("txUuid1", true)
+ transaction, _ := buildTestTx(t)
+ ledger.CommitTxBatch(0, []*protos.Transaction{transaction}, nil, []byte("proof"))
+ testutil.AssertEquals(t, ledgerTestWrapper.GetState("chaincode1", "key1", true), []byte("value1A"))
+ testutil.AssertEquals(t, ledgerTestWrapper.GetState("chaincode2", "key2", true), []byte("value2A"))
+ testutil.AssertEquals(t, ledgerTestWrapper.GetState("chaincode3", "key3", true), []byte("value3A"))
+
+ // Block 1
+ ledger.BeginTxBatch(1)
+ ledger.TxBegin("txUuid1")
+ ledger.SetState("chaincode1", "key1", []byte("value1B"))
+ ledger.SetState("chaincode2", "key2", []byte("value2B"))
+ ledger.SetState("chaincode3", "key3", []byte("value3B"))
+ ledger.TxFinished("txUuid1", true)
+ transaction, _ = buildTestTx(t)
+ ledger.CommitTxBatch(1, []*protos.Transaction{transaction}, nil, []byte("proof"))
+ testutil.AssertEquals(t, ledgerTestWrapper.GetState("chaincode1", "key1", true), []byte("value1B"))
+ testutil.AssertEquals(t, ledgerTestWrapper.GetState("chaincode2", "key2", true), []byte("value2B"))
+ testutil.AssertEquals(t, ledgerTestWrapper.GetState("chaincode3", "key3", true), []byte("value3B"))
+
+ // Block 2
+ ledger.BeginTxBatch(2)
+ ledger.TxBegin("txUuid1")
+ ledger.SetState("chaincode1", "key1", []byte("value1C"))
+ ledger.SetState("chaincode2", "key2", []byte("value2C"))
+ ledger.SetState("chaincode3", "key3", []byte("value3C"))
+ ledger.SetState("chaincode4", "key4", []byte("value4C"))
+ ledger.TxFinished("txUuid1", true)
+ transaction, _ = buildTestTx(t)
+ ledger.CommitTxBatch(2, []*protos.Transaction{transaction}, nil, []byte("proof"))
+ testutil.AssertEquals(t, ledgerTestWrapper.GetState("chaincode1", "key1", true), []byte("value1C"))
+ testutil.AssertEquals(t, ledgerTestWrapper.GetState("chaincode2", "key2", true), []byte("value2C"))
+ testutil.AssertEquals(t, ledgerTestWrapper.GetState("chaincode3", "key3", true), []byte("value3C"))
+ testutil.AssertEquals(t, ledgerTestWrapper.GetState("chaincode4", "key4", true), []byte("value4C"))
+
+ // Roll backwards once
+ delta2 := ledgerTestWrapper.GetStateDelta(2)
+ delta2.RollBackwards = true
+ ledgerTestWrapper.ApplyStateDelta(1, delta2)
+ ledgerTestWrapper.CommitStateDelta(1)
+ testutil.AssertEquals(t, ledgerTestWrapper.GetState("chaincode1", "key1", true), []byte("value1B"))
+ testutil.AssertEquals(t, ledgerTestWrapper.GetState("chaincode2", "key2", true), []byte("value2B"))
+ testutil.AssertEquals(t, ledgerTestWrapper.GetState("chaincode3", "key3", true), []byte("value3B"))
+ testutil.AssertEquals(t, ledgerTestWrapper.GetState("chaincode4", "key4", true), nil)
+
+ // Now roll forwards once
+ delta2.RollBackwards = false
+ ledgerTestWrapper.ApplyStateDelta(2, delta2)
+ ledgerTestWrapper.CommitStateDelta(2)
+ testutil.AssertEquals(t, ledgerTestWrapper.GetState("chaincode1", "key1", true), []byte("value1C"))
+ testutil.AssertEquals(t, ledgerTestWrapper.GetState("chaincode2", "key2", true), []byte("value2C"))
+ testutil.AssertEquals(t, ledgerTestWrapper.GetState("chaincode3", "key3", true), []byte("value3C"))
+ testutil.AssertEquals(t, ledgerTestWrapper.GetState("chaincode4", "key4", true), []byte("value4C"))
+
+ // Now roll backwards twice
+ delta2.RollBackwards = true
+ delta1 := ledgerTestWrapper.GetStateDelta(1)
+ ledgerTestWrapper.ApplyStateDelta(3, delta2)
+ ledgerTestWrapper.CommitStateDelta(3)
+
+ delta1.RollBackwards = true
+ ledgerTestWrapper.ApplyStateDelta(4, delta1)
+ ledgerTestWrapper.CommitStateDelta(4)
+ testutil.AssertEquals(t, ledgerTestWrapper.GetState("chaincode1", "key1", true), []byte("value1A"))
+ testutil.AssertEquals(t, ledgerTestWrapper.GetState("chaincode2", "key2", true), []byte("value2A"))
+ testutil.AssertEquals(t, ledgerTestWrapper.GetState("chaincode3", "key3", true), []byte("value3A"))
+
+ // Now roll forwards twice
+ delta2.RollBackwards = false
+ delta1.RollBackwards = false
+ ledgerTestWrapper.ApplyStateDelta(5, delta1)
+ ledgerTestWrapper.CommitStateDelta(5)
+ ledgerTestWrapper.ApplyStateDelta(6, delta2)
+ ledgerTestWrapper.CommitStateDelta(6)
+ testutil.AssertEquals(t, ledgerTestWrapper.GetState("chaincode1", "key1", true), []byte("value1C"))
+ testutil.AssertEquals(t, ledgerTestWrapper.GetState("chaincode2", "key2", true), []byte("value2C"))
+ testutil.AssertEquals(t, ledgerTestWrapper.GetState("chaincode3", "key3", true), []byte("value3C"))
+ testutil.AssertEquals(t, ledgerTestWrapper.GetState("chaincode4", "key4", true), []byte("value4C"))
+}
+
+func TestInvalidOrderDelta(t *testing.T) {
+ ledgerTestWrapper := createFreshDBAndTestLedgerWrapper(t)
+ ledger := ledgerTestWrapper.ledger
+
+ // Block 0
+ ledger.BeginTxBatch(0)
+ ledger.TxBegin("txUuid1")
+ ledger.SetState("chaincode1", "key1", []byte("value1A"))
+ ledger.SetState("chaincode2", "key2", []byte("value2A"))
+ ledger.SetState("chaincode3", "key3", []byte("value3A"))
+ ledger.TxFinished("txUuid1", true)
+ transaction, _ := buildTestTx(t)
+ ledger.CommitTxBatch(0, []*protos.Transaction{transaction}, nil, []byte("proof"))
+ testutil.AssertEquals(t, ledgerTestWrapper.GetState("chaincode1", "key1", true), []byte("value1A"))
+ testutil.AssertEquals(t, ledgerTestWrapper.GetState("chaincode2", "key2", true), []byte("value2A"))
+ testutil.AssertEquals(t, ledgerTestWrapper.GetState("chaincode3", "key3", true), []byte("value3A"))
+
+ // Block 1
+ ledger.BeginTxBatch(1)
+ ledger.TxBegin("txUuid1")
+ ledger.SetState("chaincode1", "key1", []byte("value1B"))
+ ledger.SetState("chaincode2", "key2", []byte("value2B"))
+ ledger.SetState("chaincode3", "key3", []byte("value3B"))
+ ledger.TxFinished("txUuid1", true)
+ transaction, _ = buildTestTx(t)
+ ledger.CommitTxBatch(1, []*protos.Transaction{transaction}, nil, []byte("proof"))
+ testutil.AssertEquals(t, ledgerTestWrapper.GetState("chaincode1", "key1", true), []byte("value1B"))
+ testutil.AssertEquals(t, ledgerTestWrapper.GetState("chaincode2", "key2", true), []byte("value2B"))
+ testutil.AssertEquals(t, ledgerTestWrapper.GetState("chaincode3", "key3", true), []byte("value3B"))
+
+ delta := ledgerTestWrapper.GetStateDelta(1)
+
+ err := ledger.CommitStateDelta(1)
+ testutil.AssertError(t, err, "Expected error commiting delta")
+
+ err = ledger.RollbackTxBatch(1)
+ testutil.AssertError(t, err, "Expected error rolling back delta")
+
+ ledgerTestWrapper.ApplyStateDelta(2, delta)
+
+ err = ledger.ApplyStateDelta(3, delta)
+ testutil.AssertError(t, err, "Expected error applying delta")
+
+ err = ledger.CommitStateDelta(3)
+ testutil.AssertError(t, err, "Expected error applying delta")
+
+ err = ledger.RollbackStateDelta(3)
+ testutil.AssertError(t, err, "Expected error applying delta")
+
+}
+
+func TestApplyDeltaHash(t *testing.T) {
+
+ ledgerTestWrapper := createFreshDBAndTestLedgerWrapper(t)
+ ledger := ledgerTestWrapper.ledger
+
+ // Block 0
+ ledger.BeginTxBatch(0)
+ ledger.TxBegin("txUuid1")
+ ledger.SetState("chaincode1", "key1", []byte("value1A"))
+ ledger.SetState("chaincode2", "key2", []byte("value2A"))
+ ledger.SetState("chaincode3", "key3", []byte("value3A"))
+ ledger.TxFinished("txUuid1", true)
+ transaction, _ := buildTestTx(t)
+ ledger.CommitTxBatch(0, []*protos.Transaction{transaction}, nil, []byte("proof"))
+ testutil.AssertEquals(t, ledgerTestWrapper.GetState("chaincode1", "key1", true), []byte("value1A"))
+ testutil.AssertEquals(t, ledgerTestWrapper.GetState("chaincode2", "key2", true), []byte("value2A"))
+ testutil.AssertEquals(t, ledgerTestWrapper.GetState("chaincode3", "key3", true), []byte("value3A"))
+
+ // Block 1
+ ledger.BeginTxBatch(1)
+ ledger.TxBegin("txUuid1")
+ ledger.SetState("chaincode1", "key1", []byte("value1B"))
+ ledger.SetState("chaincode2", "key2", []byte("value2B"))
+ ledger.SetState("chaincode3", "key3", []byte("value3B"))
+ ledger.TxFinished("txUuid1", true)
+ transaction, _ = buildTestTx(t)
+ ledger.CommitTxBatch(1, []*protos.Transaction{transaction}, nil, []byte("proof"))
+ testutil.AssertEquals(t, ledgerTestWrapper.GetState("chaincode1", "key1", true), []byte("value1B"))
+ testutil.AssertEquals(t, ledgerTestWrapper.GetState("chaincode2", "key2", true), []byte("value2B"))
+ testutil.AssertEquals(t, ledgerTestWrapper.GetState("chaincode3", "key3", true), []byte("value3B"))
+
+ // Block 2
+ ledger.BeginTxBatch(2)
+ ledger.TxBegin("txUuid1")
+ ledger.SetState("chaincode1", "key1", []byte("value1C"))
+ ledger.SetState("chaincode2", "key2", []byte("value2C"))
+ ledger.SetState("chaincode3", "key3", []byte("value3C"))
+ ledger.SetState("chaincode4", "key4", []byte("value4C"))
+ ledger.TxFinished("txUuid1", true)
+ transaction, _ = buildTestTx(t)
+ ledger.CommitTxBatch(2, []*protos.Transaction{transaction}, nil, []byte("proof"))
+ testutil.AssertEquals(t, ledgerTestWrapper.GetState("chaincode1", "key1", true), []byte("value1C"))
+ testutil.AssertEquals(t, ledgerTestWrapper.GetState("chaincode2", "key2", true), []byte("value2C"))
+ testutil.AssertEquals(t, ledgerTestWrapper.GetState("chaincode3", "key3", true), []byte("value3C"))
+ testutil.AssertEquals(t, ledgerTestWrapper.GetState("chaincode4", "key4", true), []byte("value4C"))
+
+ hash2 := ledgerTestWrapper.GetTempStateHash()
+
+ // Roll backwards once
+ delta2 := ledgerTestWrapper.GetStateDelta(2)
+ delta2.RollBackwards = true
+ ledgerTestWrapper.ApplyStateDelta(1, delta2)
+
+ preHash1 := ledgerTestWrapper.GetTempStateHash()
+ testutil.AssertNotEquals(t, preHash1, hash2)
+
+ ledgerTestWrapper.CommitStateDelta(1)
+
+ hash1 := ledgerTestWrapper.GetTempStateHash()
+ testutil.AssertEquals(t, preHash1, hash1)
+ testutil.AssertNotEquals(t, hash1, hash2)
+
+ // Roll forwards once
+ delta2.RollBackwards = false
+ ledgerTestWrapper.ApplyStateDelta(2, delta2)
+ preHash2 := ledgerTestWrapper.GetTempStateHash()
+ testutil.AssertEquals(t, preHash2, hash2)
+ ledgerTestWrapper.RollbackStateDelta(2)
+ preHash2 = ledgerTestWrapper.GetTempStateHash()
+ testutil.AssertEquals(t, preHash2, hash1)
+ ledgerTestWrapper.ApplyStateDelta(3, delta2)
+ preHash2 = ledgerTestWrapper.GetTempStateHash()
+ testutil.AssertEquals(t, preHash2, hash2)
+ ledgerTestWrapper.CommitStateDelta(3)
+ preHash2 = ledgerTestWrapper.GetTempStateHash()
+ testutil.AssertEquals(t, preHash2, hash2)
+
+}
+
+func TestPreviewTXBatchBlock(t *testing.T) {
+ ledgerTestWrapper := createFreshDBAndTestLedgerWrapper(t)
+ ledger := ledgerTestWrapper.ledger
+
+ // Block 0
+ ledger.BeginTxBatch(0)
+ ledger.TxBegin("txUuid1")
+ ledger.SetState("chaincode1", "key1", []byte("value1A"))
+ ledger.SetState("chaincode2", "key2", []byte("value2A"))
+ ledger.SetState("chaincode3", "key3", []byte("value3A"))
+ ledger.TxFinished("txUuid1", true)
+ transaction, _ := buildTestTx(t)
+
+ previewBlockInfo, err := ledger.GetTXBatchPreviewBlockInfo(0, []*protos.Transaction{transaction}, []byte("proof"))
+ testutil.AssertNoError(t, err, "Error fetching preview block info.")
+
+ ledger.CommitTxBatch(0, []*protos.Transaction{transaction}, nil, []byte("proof"))
+ committedBlockInfo, err := ledger.GetBlockchainInfo()
+ testutil.AssertNoError(t, err, "Error fetching committed block hash.")
+
+ testutil.AssertEquals(t, previewBlockInfo, committedBlockInfo)
+}
+
+func TestGetTransactionByUUID(t *testing.T) {
+ ledgerTestWrapper := createFreshDBAndTestLedgerWrapper(t)
+ ledger := ledgerTestWrapper.ledger
+
+ // Block 0
+ ledger.BeginTxBatch(0)
+ ledger.TxBegin("txUuid1")
+ ledger.SetState("chaincode1", "key1", []byte("value1A"))
+ ledger.SetState("chaincode2", "key2", []byte("value2A"))
+ ledger.SetState("chaincode3", "key3", []byte("value3A"))
+ ledger.TxFinished("txUuid1", true)
+ transaction, uuid := buildTestTx(t)
+ ledger.CommitTxBatch(0, []*protos.Transaction{transaction}, nil, []byte("proof"))
+
+ ledgerTransaction, err := ledger.GetTransactionByUUID(uuid)
+ testutil.AssertNoError(t, err, "Error fetching transaction by UUID.")
+ testutil.AssertEquals(t, transaction, ledgerTransaction)
+
+ ledgerTransaction, err = ledger.GetTransactionByUUID("InvalidUUID")
+ testutil.AssertEquals(t, err, ErrResourceNotFound)
+ testutil.AssertNil(t, ledgerTransaction)
+}
+
+func TestRangeScanIterator(t *testing.T) {
+ ledgerTestWrapper := createFreshDBAndTestLedgerWrapper(t)
+ ledger := ledgerTestWrapper.ledger
+
+ ///////// Test with an empty Ledger //////////
+ //////////////////////////////////////////////
+ itr, _ := ledger.GetStateRangeScanIterator("chaincodeID2", "key2", "key5", false)
+ statemgmt.AssertIteratorContains(t, itr, map[string][]byte{})
+ itr.Close()
+
+ itr, _ = ledger.GetStateRangeScanIterator("chaincodeID2", "key2", "key5", true)
+ statemgmt.AssertIteratorContains(t, itr, map[string][]byte{})
+ itr.Close()
+
+ // Commit initial data to ledger
+ ledger.BeginTxBatch(0)
+ ledger.TxBegin("txUuid1")
+ ledger.SetState("chaincodeID1", "key1", []byte("value1"))
+
+ ledger.SetState("chaincodeID2", "key1", []byte("value1"))
+ ledger.SetState("chaincodeID2", "key2", []byte("value2"))
+ ledger.SetState("chaincodeID2", "key3", []byte("value3"))
+
+ ledger.SetState("chaincodeID3", "key1", []byte("value1"))
+
+ ledger.SetState("chaincodeID4", "key1", []byte("value1"))
+ ledger.SetState("chaincodeID4", "key2", []byte("value2"))
+ ledger.SetState("chaincodeID4", "key3", []byte("value3"))
+ ledger.SetState("chaincodeID4", "key4", []byte("value4"))
+ ledger.SetState("chaincodeID4", "key5", []byte("value5"))
+ ledger.SetState("chaincodeID4", "key6", []byte("value6"))
+ ledger.SetState("chaincodeID4", "key7", []byte("value7"))
+
+ ledger.SetState("chaincodeID5", "key1", []byte("value5"))
+ ledger.SetState("chaincodeID6", "key1", []byte("value6"))
+
+ ledger.TxFinished("txUuid1", true)
+ transaction, _ := buildTestTx(t)
+ ledger.CommitTxBatch(0, []*protos.Transaction{transaction}, nil, []byte("proof"))
+
+ // Add new keys and modify existing keys in on-going tx-batch
+ ledger.BeginTxBatch(1)
+ ledger.TxBegin("txUuid1")
+ ledger.SetState("chaincodeID4", "key2", []byte("value2_new"))
+ ledger.DeleteState("chaincodeID4", "key3")
+ ledger.SetState("chaincodeID4", "key8", []byte("value8_new"))
+
+ ///////////////////// Test with committed=true ///////////
+ //////////////////////////////////////////////////////////
+ // test range scan for chaincodeID4
+ itr, _ = ledger.GetStateRangeScanIterator("chaincodeID4", "key2", "key5", true)
+ statemgmt.AssertIteratorContains(t, itr,
+ map[string][]byte{
+ "key2": []byte("value2"),
+ "key3": []byte("value3"),
+ "key4": []byte("value4"),
+ "key5": []byte("value5"),
+ })
+ itr.Close()
+
+ // test with empty start-key
+ itr, _ = ledger.GetStateRangeScanIterator("chaincodeID4", "", "key5", true)
+ statemgmt.AssertIteratorContains(t, itr,
+ map[string][]byte{
+ "key1": []byte("value1"),
+ "key2": []byte("value2"),
+ "key3": []byte("value3"),
+ "key4": []byte("value4"),
+ "key5": []byte("value5"),
+ })
+ itr.Close()
+
+ // test with empty end-key
+ itr, _ = ledger.GetStateRangeScanIterator("chaincodeID4", "", "", true)
+ statemgmt.AssertIteratorContains(t, itr,
+ map[string][]byte{
+ "key1": []byte("value1"),
+ "key2": []byte("value2"),
+ "key3": []byte("value3"),
+ "key4": []byte("value4"),
+ "key5": []byte("value5"),
+ "key6": []byte("value6"),
+ "key7": []byte("value7"),
+ })
+ itr.Close()
+
+ ///////////////////// Test with committed=false ///////////
+ //////////////////////////////////////////////////////////
+ // test range scan for chaincodeID4
+ itr, _ = ledger.GetStateRangeScanIterator("chaincodeID4", "key2", "key5", false)
+ statemgmt.AssertIteratorContains(t, itr,
+ map[string][]byte{
+ "key2": []byte("value2_new"),
+ "key4": []byte("value4"),
+ "key5": []byte("value5"),
+ })
+ itr.Close()
+
+ // test with empty start-key
+ itr, _ = ledger.GetStateRangeScanIterator("chaincodeID4", "", "key5", false)
+ statemgmt.AssertIteratorContains(t, itr,
+ map[string][]byte{
+ "key1": []byte("value1"),
+ "key2": []byte("value2_new"),
+ "key4": []byte("value4"),
+ "key5": []byte("value5"),
+ })
+ itr.Close()
+
+ // test with empty end-key
+ itr, _ = ledger.GetStateRangeScanIterator("chaincodeID4", "", "", false)
+ statemgmt.AssertIteratorContains(t, itr,
+ map[string][]byte{
+ "key1": []byte("value1"),
+ "key2": []byte("value2_new"),
+ "key4": []byte("value4"),
+ "key5": []byte("value5"),
+ "key6": []byte("value6"),
+ "key7": []byte("value7"),
+ "key8": []byte("value8_new"),
+ })
+ itr.Close()
+}
+
+func TestGetSetMultipleKeys(t *testing.T) {
+ ledgerTestWrapper := createFreshDBAndTestLedgerWrapper(t)
+ l := ledgerTestWrapper.ledger
+ l.BeginTxBatch(1)
+ l.TxBegin("txUUID")
+ l.SetStateMultipleKeys("chaincodeID", map[string][]byte{"key1": []byte("value1"), "key2": []byte("value2")})
+ l.TxFinished("txUUID", true)
+ tx, _ := buildTestTx(t)
+ l.CommitTxBatch(1, []*protos.Transaction{tx}, nil, nil)
+
+ values, _ := l.GetStateMultipleKeys("chaincodeID", []string{"key1", "key2"}, true)
+ testutil.AssertEquals(t, values, [][]byte{[]byte("value1"), []byte("value2")})
+}
+
+func TestCopyState(t *testing.T) {
+ ledgerTestWrapper := createFreshDBAndTestLedgerWrapper(t)
+ l := ledgerTestWrapper.ledger
+ l.BeginTxBatch(1)
+ l.TxBegin("txUUID")
+ l.SetStateMultipleKeys("chaincodeID1", map[string][]byte{"key1": []byte("value1"), "key2": []byte("value2")})
+ l.SetState("chaincodeID1", "key3", []byte("value3"))
+ l.TxFinished("txUUID", true)
+ tx, _ := buildTestTx(t)
+ l.CommitTxBatch(1, []*protos.Transaction{tx}, nil, nil)
+
+ l.BeginTxBatch(2)
+ l.TxBegin("txUUID")
+ l.CopyState("chaincodeID1", "chaincodeID2")
+ l.TxFinished("txUUID", true)
+ tx, _ = buildTestTx(t)
+ l.CommitTxBatch(2, []*protos.Transaction{tx}, nil, nil)
+
+ values, _ := l.GetStateMultipleKeys("chaincodeID2", []string{"key1", "key2", "key3"}, true)
+ testutil.AssertEquals(t, values, [][]byte{[]byte("value1"), []byte("value2"), []byte("value3")})
+}
+
+func TestLedgerEmptyArrayValue(t *testing.T) {
+ ledgerTestWrapper := createFreshDBAndTestLedgerWrapper(t)
+ l := ledgerTestWrapper.ledger
+ l.BeginTxBatch(1)
+ l.TxBegin("txUUID")
+ l.SetState("chaincodeID1", "key1", []byte{})
+ l.TxFinished("txUUID", true)
+ tx, _ := buildTestTx(t)
+ l.CommitTxBatch(1, []*protos.Transaction{tx}, nil, nil)
+
+ value, _ := l.GetState("chaincodeID1", "key1", true)
+ if value == nil || len(value) != 0 {
+ t.Fatalf("An empty array expected in value. Found = %#v", value)
+ }
+
+ value, _ = l.GetState("chaincodeID1", "non-existing-key", true)
+ if value != nil {
+ t.Fatalf("A nil value expected. Found = %#v", value)
+ }
+}
+
+func TestLedgerInvalidInput(t *testing.T) {
+ ledgerTestWrapper := createFreshDBAndTestLedgerWrapper(t)
+ l := ledgerTestWrapper.ledger
+ l.BeginTxBatch(1)
+ l.TxBegin("txUUID")
+
+ // nil value input
+ err := l.SetState("chaincodeID1", "key1", nil)
+ ledgerErr, ok := err.(*Error)
+ if !(ok && ledgerErr.Type() == ErrorTypeInvalidArgument) {
+ t.Fatal("A 'LedgerError' of type 'ErrorTypeInvalidArgument' should have been thrown")
+ } else {
+ t.Logf("An expected error [%s] is received", err)
+ }
+
+ // empty string key
+ err = l.SetState("chaincodeID1", "", []byte("value1"))
+ ledgerErr, ok = err.(*Error)
+ if !(ok && ledgerErr.Type() == ErrorTypeInvalidArgument) {
+ t.Fatal("A 'LedgerError' of type 'ErrorTypeInvalidArgument' should have been thrown")
+ }
+
+ l.SetState("chaincodeID1", "key1", []byte("value1"))
+ l.TxFinished("txUUID", true)
+ tx, _ := buildTestTx(t)
+ l.CommitTxBatch(1, []*protos.Transaction{tx}, nil, nil)
+ value, _ := l.GetState("chaincodeID1", "key1", true)
+ testutil.AssertEquals(t, value, []byte("value1"))
+}
diff --git a/core/ledger/ledger_test_exports.go b/core/ledger/ledger_test_exports.go
new file mode 100644
index 00000000000..f1d7d013b04
--- /dev/null
+++ b/core/ledger/ledger_test_exports.go
@@ -0,0 +1,37 @@
+/*
+Copyright IBM Corp. 2016 All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package ledger
+
+import (
+ "testing"
+
+ "github.com/hyperledger/fabric/core/db"
+ "github.com/hyperledger/fabric/core/ledger/testutil"
+)
+
+var testDBWrapper = db.NewTestDBWrapper()
+
+//InitTestLedger provides a ledger for testing. This method creates a fresh db and constructs a ledger instance on that.
+func InitTestLedger(t *testing.T) *Ledger {
+ testDBWrapper.CleanDB(t)
+ _, err := GetLedger()
+ testutil.AssertNoError(t, err, "Error while constructing ledger")
+ newLedger, err := GetNewLedger()
+ testutil.AssertNoError(t, err, "Error while constructing ledger")
+ ledger = newLedger
+ return newLedger
+}
diff --git a/core/ledger/perf_test.go b/core/ledger/perf_test.go
new file mode 100644
index 00000000000..60d87d2c7b1
--- /dev/null
+++ b/core/ledger/perf_test.go
@@ -0,0 +1,233 @@
+/*
+Copyright IBM Corp. 2016 All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package ledger
+
+import (
+ "flag"
+ "strconv"
+ "testing"
+ "time"
+
+ "github.com/hyperledger/fabric/core/db"
+ "github.com/hyperledger/fabric/core/ledger/perfstat"
+ "github.com/hyperledger/fabric/core/ledger/testutil"
+ "github.com/hyperledger/fabric/core/util"
+ "github.com/hyperledger/fabric/protos"
+ "github.com/op/go-logging"
+ "github.com/tecbot/gorocksdb"
+)
+
+func BenchmarkDB(b *testing.B) {
+ b.Logf("testParams:%q", testParams)
+ flags := flag.NewFlagSet("testParams", flag.ExitOnError)
+ kvSize := flags.Int("KVSize", 1000, "size of the key-value")
+ toPopulateDB := flags.Bool("PopulateDB", false, "Run in populate DB mode")
+ maxKeySuffix := flags.Int("MaxKeySuffix", 1, "the keys are appended with _1, _2,.. upto MaxKeySuffix")
+ keyPrefix := flags.String("KeyPrefix", "Key_", "The generated workload will have keys such as KeyPrefix_1, KeyPrefix_2, and so on")
+ flags.Parse(testParams)
+ if *toPopulateDB {
+ b.ResetTimer()
+ populateDB(b, *kvSize, *maxKeySuffix, *keyPrefix)
+ return
+ }
+
+ dbWrapper := db.NewTestDBWrapper()
+ randNumGen := testutil.NewTestRandomNumberGenerator(*maxKeySuffix)
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ key := []byte(*keyPrefix + strconv.Itoa(randNumGen.Next()))
+ value := dbWrapper.GetFromDB(b, key)
+ b.SetBytes(int64(len(value)))
+ }
+}
+
+func BenchmarkLedgerSingleKeyTransaction(b *testing.B) {
+ b.Logf("testParams:%q", testParams)
+ flags := flag.NewFlagSet("testParams", flag.ExitOnError)
+ key := flags.String("Key", "key", "key name")
+ kvSize := flags.Int("KVSize", 1000, "size of the key-value")
+ batchSize := flags.Int("BatchSize", 100, "size of the key-value")
+ numBatches := flags.Int("NumBatches", 100, "number of batches")
+ numWritesToLedger := flags.Int("NumWritesToLedger", 4, "size of the key-value")
+ flags.Parse(testParams)
+
+ b.Logf(`Running test with params: key=%s, kvSize=%d, batchSize=%d, numBatches=%d, NumWritesToLedger=%d`,
+ *key, *kvSize, *batchSize, *numBatches, *numWritesToLedger)
+
+ ledgerTestWrapper := createFreshDBAndTestLedgerWrapper(b)
+ ledger := ledgerTestWrapper.ledger
+
+ chaincode := "chaincodeId"
+ value := testutil.ConstructRandomBytes(b, *kvSize-(len(chaincode)+len(*key)))
+ tx := constructDummyTx(b)
+ serializedBytes, _ := tx.Bytes()
+ b.Logf("Size of serialized bytes for tx = %d", len(serializedBytes))
+ b.ResetTimer()
+ for n := 0; n < b.N; n++ {
+ for i := 0; i < *numBatches; i++ {
+ ledger.BeginTxBatch(1)
+ // execute one batch
+ var transactions []*protos.Transaction
+ for j := 0; j < *batchSize; j++ {
+ ledger.TxBegin("txUuid")
+ _, err := ledger.GetState(chaincode, *key, true)
+ if err != nil {
+ b.Fatalf("Error in getting state: %s", err)
+ }
+ for l := 0; l < *numWritesToLedger; l++ {
+ ledger.SetState(chaincode, *key, value)
+ }
+ ledger.TxFinished("txUuid", true)
+ transactions = append(transactions, tx)
+ }
+ ledger.CommitTxBatch(1, transactions, nil, []byte("proof"))
+ }
+ }
+ b.StopTimer()
+
+ //verify value persisted
+ value, _ = ledger.GetState(chaincode, *key, true)
+ size := ledger.GetBlockchainSize()
+ b.Logf("Value size=%d, Blockchain height=%d", len(value), size)
+}
+
+func BenchmarkLedgerPopulate(b *testing.B) {
+ b.Logf("testParams:%q", testParams)
+ disableLogging()
+ flags := flag.NewFlagSet("testParams", flag.ExitOnError)
+ kvSize := flags.Int("KVSize", 1000, "size of the key-value")
+ maxKeySuffix := flags.Int("MaxKeySuffix", 1, "the keys are appended with _1, _2,.. upto MaxKeySuffix")
+ keyPrefix := flags.String("KeyPrefix", "Key_", "The generated workload will have keys such as KeyPrefix_1, KeyPrefix_2, and so on")
+ batchSize := flags.Int("BatchSize", 100, "size of the key-value")
+ flags.Parse(testParams)
+
+ b.Logf(`Running test with params: keyPrefix=%s, kvSize=%d, batchSize=%d, maxKeySuffix=%d`,
+ *keyPrefix, *kvSize, *batchSize, *maxKeySuffix)
+
+ ledgerTestWrapper := createFreshDBAndTestLedgerWrapper(b)
+ ledger := ledgerTestWrapper.ledger
+
+ chaincode := "chaincodeId"
+ numBatches := *maxKeySuffix / *batchSize
+ tx := constructDummyTx(b)
+ value := testutil.ConstructRandomBytes(b, *kvSize-(len(chaincode)+len(*keyPrefix)))
+
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ for batchID := 0; batchID < numBatches; batchID++ {
+ ledger.BeginTxBatch(1)
+ // execute one batch
+ var transactions []*protos.Transaction
+ for j := 0; j < *batchSize; j++ {
+ ledger.TxBegin("txUuid")
+ keyNumber := batchID*(*batchSize) + j
+ key := *keyPrefix + strconv.Itoa(keyNumber)
+ ledger.SetState(chaincode, key, value)
+ ledger.TxFinished("txUuid", true)
+ transactions = append(transactions, tx)
+ }
+ ledger.CommitTxBatch(1, transactions, nil, []byte("proof"))
+ }
+ }
+ b.StopTimer()
+ b.Logf("DB stats afters populating: %s", testDBWrapper.GetEstimatedNumKeys(b))
+}
+
+func BenchmarkLedgerRandomTransactions(b *testing.B) {
+ disableLogging()
+ b.Logf("testParams:%q", testParams)
+ flags := flag.NewFlagSet("testParams", flag.ExitOnError)
+ keyPrefix := flags.String("KeyPrefix", "Key_", "The generated workload will have keys such as KeyPrefix_1, KeyPrefix_2, and so on")
+ kvSize := flags.Int("KVSize", 1000, "size of the key-value")
+ maxKeySuffix := flags.Int("MaxKeySuffix", 1, "the keys are appended with _1, _2,.. upto MaxKeySuffix")
+ batchSize := flags.Int("BatchSize", 100, "size of the key-value")
+ numBatches := flags.Int("NumBatches", 100, "number of batches")
+ numReadsFromLedger := flags.Int("NumReadsFromLedger", 4, "Number of Key-Values to read")
+ numWritesToLedger := flags.Int("NumWritesToLedger", 4, "Number of Key-Values to write")
+ flags.Parse(testParams)
+
+ b.Logf(`Running test with params: keyPrefix=%s, kvSize=%d, batchSize=%d, maxKeySuffix=%d, numBatches=%d, numReadsFromLedger=%d, numWritesToLedger=%d`,
+ *keyPrefix, *kvSize, *batchSize, *maxKeySuffix, *numBatches, *numReadsFromLedger, *numWritesToLedger)
+
+ ledger, err := GetNewLedger()
+ testutil.AssertNoError(b, err, "Error while constructing ledger")
+
+ chaincode := "chaincodeId"
+ tx := constructDummyTx(b)
+ value := testutil.ConstructRandomBytes(b, *kvSize-(len(chaincode)+len(*keyPrefix)))
+
+ b.ResetTimer()
+ startTime := time.Now()
+ for i := 0; i < b.N; i++ {
+ for batchID := 0; batchID < *numBatches; batchID++ {
+ ledger.BeginTxBatch(1)
+ // execute one batch
+ var transactions []*protos.Transaction
+ for j := 0; j < *batchSize; j++ {
+ randomKeySuffixGen := testutil.NewTestRandomNumberGenerator(*maxKeySuffix)
+ ledger.TxBegin("txUuid")
+ for k := 0; k < *numReadsFromLedger; k++ {
+ randomKey := *keyPrefix + strconv.Itoa(randomKeySuffixGen.Next())
+ ledger.GetState(chaincode, randomKey, true)
+ }
+ for k := 0; k < *numWritesToLedger; k++ {
+ randomKey := *keyPrefix + strconv.Itoa(randomKeySuffixGen.Next())
+ ledger.SetState(chaincode, randomKey, value)
+ }
+ ledger.TxFinished("txUuid", true)
+ transactions = append(transactions, tx)
+ }
+ ledger.CommitTxBatch(1, transactions, nil, []byte("proof"))
+ }
+ }
+ b.StopTimer()
+ perfstat.UpdateTimeStat("timeSpent", startTime)
+ perfstat.PrintStats()
+ b.Logf("DB stats afters populating: %s", testDBWrapper.GetEstimatedNumKeys(b))
+}
+
+func populateDB(tb testing.TB, kvSize int, totalKeys int, keyPrefix string) {
+ dbWrapper := db.NewTestDBWrapper()
+ dbWrapper.CleanDB(tb)
+ batch := gorocksdb.NewWriteBatch()
+ for i := 0; i < totalKeys; i++ {
+ key := []byte(keyPrefix + strconv.Itoa(i))
+ value := testutil.ConstructRandomBytes(tb, kvSize-len(key))
+ batch.Put(key, value)
+ if i%1000 == 0 {
+ dbWrapper.WriteToDB(tb, batch)
+ batch = gorocksdb.NewWriteBatch()
+ }
+ }
+ dbWrapper.CloseDB(tb)
+}
+
+func constructDummyTx(tb testing.TB) *protos.Transaction {
+ uuid := util.GenerateUUID()
+ tx, err := protos.NewTransaction(protos.ChaincodeID{Path: "dummyChaincodeId"}, uuid, "dummyFunction", []string{"dummyParamValue1, dummyParamValue2"})
+ testutil.AssertNil(tb, err)
+ return tx
+}
+
+func disableLogging() {
+ testutil.SetLogLevel(logging.ERROR, "indexes")
+ testutil.SetLogLevel(logging.ERROR, "ledger")
+ testutil.SetLogLevel(logging.INFO, "state")
+ testutil.SetLogLevel(logging.ERROR, "statemgmt")
+ testutil.SetLogLevel(logging.INFO, "buckettree")
+ testutil.SetLogLevel(logging.INFO, "db")
+}
diff --git a/core/ledger/perfstat/stat.go b/core/ledger/perfstat/stat.go
new file mode 100644
index 00000000000..1b8382d3a5d
--- /dev/null
+++ b/core/ledger/perfstat/stat.go
@@ -0,0 +1,70 @@
+/*
+Copyright IBM Corp. 2016 All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package perfstat
+
+import (
+ "fmt"
+ "sync"
+ "time"
+)
+
+type stat struct {
+ rwLock sync.RWMutex
+ statName string
+ desc string
+
+ numInvocations int64
+ total int64
+ min int64
+ max int64
+}
+
+func newStat(name string, desc string) *stat {
+ return &stat{statName: name, desc: desc, min: int64(^uint64(0) >> 1)}
+}
+
+func (s *stat) String() string {
+ s.rwLock.RLock()
+ defer s.rwLock.RUnlock()
+ return fmt.Sprintf("%s: [total:%d, numInvocation:%d, average:%d, min=%d, max=%d]",
+ s.statName, s.total, s.numInvocations, (s.total / s.numInvocations), s.min, s.max)
+}
+
+func (s *stat) reset() {
+ s.rwLock.Lock()
+ defer s.rwLock.Unlock()
+ s.numInvocations = 0
+ s.min = int64(^uint64(0) >> 1)
+ s.max = 0
+ s.total = 0
+}
+
+func (s *stat) updateTimeSpent(startTime time.Time) {
+ s.updateDataStat(time.Since(startTime).Nanoseconds())
+}
+
+func (s *stat) updateDataStat(value int64) {
+ s.rwLock.Lock()
+ defer s.rwLock.Unlock()
+ s.numInvocations++
+ s.total += value
+ if value < s.min {
+ s.min = value
+ } else if value > s.max {
+ s.max = value
+ }
+}
diff --git a/core/ledger/perfstat/stat_holder.go b/core/ledger/perfstat/stat_holder.go
new file mode 100644
index 00000000000..ac601cafc40
--- /dev/null
+++ b/core/ledger/perfstat/stat_holder.go
@@ -0,0 +1,162 @@
+/*
+Copyright IBM Corp. 2016 All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package perfstat
+
+import (
+ "bytes"
+ "fmt"
+ "runtime"
+ "sort"
+ "strings"
+ "sync"
+ "time"
+
+ "github.com/op/go-logging"
+)
+
+const enableStats = false
+const printPeriodically = true
+const printInterval = 10000 //Millisecond
+const commonPrefix = "github.com/hyperledger/fabric/core/ledger"
+const commonPrefixLen = len(commonPrefix)
+
+var holder *statsHolder
+var once sync.Once
+var logger = logging.MustGetLogger("ledger.perfstat")
+
+type statsHolder struct {
+ rwLock sync.RWMutex
+ m map[string]*stat
+}
+
+func init() {
+ if !enableStats {
+ return
+ }
+ holder = &statsHolder{m: make(map[string]*stat)}
+ if printPeriodically {
+ go printStatsPeriodically()
+ }
+}
+
+// UpdateTimeStat updates the stats for time spent at a particular point in the code
+func UpdateTimeStat(id string, startTime time.Time) {
+ if !enableStats {
+ return
+ }
+ path := getCallerInfo()
+ statName := fmt.Sprintf("%s:%s", path, id)
+ stat := getOrCreateStat(statName, "", 0)
+ stat.updateDataStat(time.Since(startTime).Nanoseconds())
+}
+
+// UpdateDataStat updates the stats for data at a particular point in the code
+func UpdateDataStat(id string, value int64) {
+ if !enableStats {
+ return
+ }
+ path := getCallerInfo()
+ statName := fmt.Sprintf("%s:%s", path, id)
+ stat := getOrCreateStat(statName, "", 0)
+ stat.updateDataStat(value)
+}
+
+// ResetStats resets all the stats data
+func ResetStats() {
+ if !enableStats {
+ return
+ }
+ holder.rwLock.Lock()
+ defer holder.rwLock.Unlock()
+ for _, v := range holder.m {
+ v.reset()
+ }
+}
+
+func getOrCreateStat(name string, file string, line int) *stat {
+ holder.rwLock.RLock()
+ stat, ok := holder.m[name]
+ if ok {
+ holder.rwLock.RUnlock()
+ return stat
+ }
+
+ holder.rwLock.RUnlock()
+ holder.rwLock.Lock()
+ defer holder.rwLock.Unlock()
+ stat, ok = holder.m[name]
+ if !ok {
+ stat = newStat(name, fmt.Sprintf("%s:%d", file, line))
+ holder.m[name] = stat
+ }
+ return stat
+}
+
+func printStatsPeriodically() {
+ for {
+ PrintStats()
+ time.Sleep(time.Duration(int64(printInterval) * time.Millisecond.Nanoseconds()))
+ }
+}
+
+// PrintStats prints the stats in the log file.
+func PrintStats() {
+ if !enableStats {
+ return
+ }
+ holder.rwLock.RLock()
+ defer holder.rwLock.RUnlock()
+ logger.Info("Stats.......Start")
+ var paths []string
+ for k := range holder.m {
+ paths = append(paths, k)
+ }
+ sort.Strings(paths)
+ for _, k := range paths {
+ v := holder.m[k]
+ logger.Info(v.String())
+ }
+ logger.Info("Stats.......Finish")
+}
+
+func getCallerInfo() string {
+ pc := make([]uintptr, 10)
+ runtime.Callers(3, pc)
+ var path bytes.Buffer
+ j := 0
+ for i := range pc {
+ f := runtime.FuncForPC(pc[i])
+ funcName := f.Name()
+ if strings.HasPrefix(funcName, commonPrefix) {
+ j = i
+ } else {
+ break
+ }
+ }
+
+ for i := j; i >= 0; i-- {
+ f := runtime.FuncForPC(pc[i])
+ funcName := f.Name()
+ funcNameShort := funcName[commonPrefixLen:]
+ path.WriteString(funcNameShort)
+ if i > 0 {
+ path.WriteString(" -> ")
+ }
+ }
+
+ return path.String()
+}
diff --git a/core/ledger/perfstat/stat_test.go b/core/ledger/perfstat/stat_test.go
new file mode 100644
index 00000000000..042e30a8c74
--- /dev/null
+++ b/core/ledger/perfstat/stat_test.go
@@ -0,0 +1,23 @@
+/*
+Copyright IBM Corp. 2016 All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package perfstat
+
+import "testing"
+
+func TestSkipAll(t *testing.T) {
+ t.Skip(`No tests in this package for now - This package contains code that is used in performance measurement.`)
+}
diff --git a/core/ledger/pkg_test.go b/core/ledger/pkg_test.go
new file mode 100644
index 00000000000..4d5cdcdc8f9
--- /dev/null
+++ b/core/ledger/pkg_test.go
@@ -0,0 +1,221 @@
+/*
+Copyright IBM Corp. 2016 All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package ledger
+
+import (
+ "os"
+ "testing"
+
+ "github.com/hyperledger/fabric/core/ledger/statemgmt"
+ "github.com/hyperledger/fabric/core/ledger/testutil"
+ "github.com/hyperledger/fabric/core/util"
+ "github.com/hyperledger/fabric/protos"
+ "github.com/tecbot/gorocksdb"
+ "golang.org/x/net/context"
+)
+
+var testParams []string
+
+func TestMain(m *testing.M) {
+ testParams = testutil.ParseTestParams()
+ testutil.SetupTestConfig()
+ os.Exit(m.Run())
+}
+
+type blockchainTestWrapper struct {
+ t *testing.T
+ blockchain *blockchain
+}
+
+func newTestBlockchainWrapper(t *testing.T) *blockchainTestWrapper {
+ blockchain, err := newBlockchain()
+ testutil.AssertNoError(t, err, "Error while getting handle to chain")
+ return &blockchainTestWrapper{t, blockchain}
+}
+
+func (testWrapper *blockchainTestWrapper) addNewBlock(block *protos.Block, stateHash []byte) uint64 {
+ writeBatch := gorocksdb.NewWriteBatch()
+ defer writeBatch.Destroy()
+ newBlockNumber, err := testWrapper.blockchain.addPersistenceChangesForNewBlock(context.TODO(), block, stateHash, writeBatch)
+ testutil.AssertNoError(testWrapper.t, err, "Error while adding a new block")
+ testDBWrapper.WriteToDB(testWrapper.t, writeBatch)
+ testWrapper.blockchain.blockPersistenceStatus(true)
+ return newBlockNumber
+}
+
+func (testWrapper *blockchainTestWrapper) fetchBlockchainSizeFromDB() uint64 {
+ size, err := fetchBlockchainSizeFromDB()
+ testutil.AssertNoError(testWrapper.t, err, "Error while fetching blockchain size from db")
+ return size
+}
+
+func (testWrapper *blockchainTestWrapper) getBlock(blockNumber uint64) *protos.Block {
+ block, err := testWrapper.blockchain.getBlock(blockNumber)
+ testutil.AssertNoError(testWrapper.t, err, "Error while getting block from blockchain")
+ return block
+}
+
+func (testWrapper *blockchainTestWrapper) getLastBlock() *protos.Block {
+ block, err := testWrapper.blockchain.getLastBlock()
+ testutil.AssertNoError(testWrapper.t, err, "Error while getting block from blockchain")
+ return block
+}
+
+func (testWrapper *blockchainTestWrapper) getBlockByHash(blockHash []byte) *protos.Block {
+ block, err := testWrapper.blockchain.getBlockByHash(blockHash)
+ testutil.AssertNoError(testWrapper.t, err, "Error while getting block by blockhash from blockchain")
+ return block
+}
+
+func (testWrapper *blockchainTestWrapper) getTransaction(blockNumber uint64, txIndex uint64) *protos.Transaction {
+ tx, err := testWrapper.blockchain.getTransaction(blockNumber, txIndex)
+ testutil.AssertNoError(testWrapper.t, err, "Error while getting tx from blockchain")
+ return tx
+}
+
+func (testWrapper *blockchainTestWrapper) getTransactionByBlockHash(blockHash []byte, txIndex uint64) *protos.Transaction {
+ tx, err := testWrapper.blockchain.getTransactionByBlockHash(blockHash, txIndex)
+ testutil.AssertNoError(testWrapper.t, err, "Error while getting tx from blockchain")
+ return tx
+}
+
+func (testWrapper *blockchainTestWrapper) getTransactionByUUID(txUUID string) *protos.Transaction {
+ tx, err := testWrapper.blockchain.getTransactionByUUID(txUUID)
+ testutil.AssertNoError(testWrapper.t, err, "Error while getting tx from blockchain")
+ return tx
+}
+func (testWrapper *blockchainTestWrapper) populateBlockChainWithSampleData() (blocks []*protos.Block, hashes [][]byte, err error) {
+ var allBlocks []*protos.Block
+ var allHashes [][]byte
+
+ // ------------------------------------------------------------
+ // Add the first (genesis block)
+ block1 := protos.NewBlock(nil, []byte(testutil.GenerateUUID(testWrapper.t)))
+ allBlocks = append(allBlocks, block1)
+ allHashes = append(allHashes, []byte("stateHash1"))
+ testWrapper.addNewBlock(block1, []byte("stateHash1"))
+
+ // -----------------------------------------------------------
+
+ // ------------------------------------------------------------------
+ // Deploy a chaincode
+ transaction2a, err := protos.NewTransaction(protos.ChaincodeID{Path: "Contracts"}, testutil.GenerateUUID(testWrapper.t), "NewContract", []string{"name: MyContract1, code: var x; function setX(json) {x = json.x}}"})
+ if err != nil {
+ return nil, nil, err
+ }
+ // Now we add the transaction to the block 2 and add the block to the chain
+ transactions2a := []*protos.Transaction{transaction2a}
+ block2 := protos.NewBlock(transactions2a, nil)
+
+ allBlocks = append(allBlocks, block2)
+ allHashes = append(allHashes, []byte("stateHash2"))
+ testWrapper.addNewBlock(block2, []byte("stateHash2"))
+ // -----------------------------------------------------------------
+
+ // ------------------------------------------------------------------
+ // Create a transaction
+ transaction3a, err := protos.NewTransaction(protos.ChaincodeID{Path: "MyContract"}, testutil.GenerateUUID(testWrapper.t), "setX", []string{"{x: \"hello\"}"})
+ if err != nil {
+ return nil, nil, err
+ }
+ // Create the third block and add it to the chain
+ transactions3a := []*protos.Transaction{transaction3a}
+ block3 := protos.NewBlock(transactions3a, nil)
+ allBlocks = append(allBlocks, block3)
+ allHashes = append(allHashes, []byte("stateHash3"))
+ testWrapper.addNewBlock(block3, []byte("stateHash3"))
+
+ // -----------------------------------------------------------------
+ return allBlocks, allHashes, nil
+}
+
+func buildTestTx(tb testing.TB) (*protos.Transaction, string) {
+ uuid := util.GenerateUUID()
+ tx, err := protos.NewTransaction(protos.ChaincodeID{Path: "testUrl"}, uuid, "anyfunction", []string{"param1, param2"})
+ testutil.AssertNil(tb, err)
+ return tx, uuid
+}
+
+func buildTestBlock(t *testing.T) (*protos.Block, error) {
+ transactions := []*protos.Transaction{}
+ tx, _ := buildTestTx(t)
+ transactions = append(transactions, tx)
+ block := protos.NewBlock(transactions, nil)
+ return block, nil
+}
+
+type ledgerTestWrapper struct {
+ ledger *Ledger
+ tb testing.TB
+}
+
+func createFreshDBAndTestLedgerWrapper(tb testing.TB) *ledgerTestWrapper {
+ testDBWrapper.CleanDB(tb)
+ ledger, err := GetNewLedger()
+ testutil.AssertNoError(tb, err, "Error while constructing ledger")
+ return &ledgerTestWrapper{ledger, tb}
+}
+
+func (ledgerTestWrapper *ledgerTestWrapper) GetState(chaincodeID string, key string, committed bool) []byte {
+ value, err := ledgerTestWrapper.ledger.GetState(chaincodeID, key, committed)
+ testutil.AssertNoError(ledgerTestWrapper.tb, err, "error while getting state from ledger")
+ return value
+}
+
+func (ledgerTestWrapper *ledgerTestWrapper) GetBlockByNumber(blockNumber uint64) *protos.Block {
+ block, err := ledgerTestWrapper.ledger.GetBlockByNumber(blockNumber)
+ testutil.AssertNoError(ledgerTestWrapper.tb, err, "error while getting block from ledger")
+ return block
+}
+
+func (ledgerTestWrapper *ledgerTestWrapper) VerifyChain(highBlock, lowBlock uint64) uint64 {
+ result, err := ledgerTestWrapper.ledger.VerifyChain(highBlock, lowBlock)
+ testutil.AssertNoError(ledgerTestWrapper.tb, err, "error while verifying chain")
+ return result
+}
+
+func (ledgerTestWrapper *ledgerTestWrapper) PutRawBlock(block *protos.Block, blockNumber uint64) {
+ err := ledgerTestWrapper.ledger.PutRawBlock(block, blockNumber)
+ testutil.AssertNoError(ledgerTestWrapper.tb, err, "error while verifying chain")
+}
+
+func (ledgerTestWrapper *ledgerTestWrapper) GetStateDelta(blockNumber uint64) *statemgmt.StateDelta {
+ delta, err := ledgerTestWrapper.ledger.GetStateDelta(blockNumber)
+ testutil.AssertNoError(ledgerTestWrapper.tb, err, "error while getting state delta from ledger")
+ return delta
+}
+
+func (ledgerTestWrapper *ledgerTestWrapper) GetTempStateHash() []byte {
+ hash, err := ledgerTestWrapper.ledger.GetTempStateHash()
+ testutil.AssertNoError(ledgerTestWrapper.tb, err, "error while getting state hash from ledger")
+ return hash
+}
+
+func (ledgerTestWrapper *ledgerTestWrapper) ApplyStateDelta(id interface{}, delta *statemgmt.StateDelta) {
+ err := ledgerTestWrapper.ledger.ApplyStateDelta(id, delta)
+ testutil.AssertNoError(ledgerTestWrapper.tb, err, "error applying state delta")
+}
+
+func (ledgerTestWrapper *ledgerTestWrapper) CommitStateDelta(id interface{}) {
+ err := ledgerTestWrapper.ledger.CommitStateDelta(id)
+ testutil.AssertNoError(ledgerTestWrapper.tb, err, "error committing state delta")
+}
+
+func (ledgerTestWrapper *ledgerTestWrapper) RollbackStateDelta(id interface{}) {
+ err := ledgerTestWrapper.ledger.RollbackStateDelta(id)
+ testutil.AssertNoError(ledgerTestWrapper.tb, err, "error rolling back state delta")
+}
diff --git a/core/ledger/statemgmt/buckettree/bucket_cache.go b/core/ledger/statemgmt/buckettree/bucket_cache.go
new file mode 100644
index 00000000000..1501f8b1c3c
--- /dev/null
+++ b/core/ledger/statemgmt/buckettree/bucket_cache.go
@@ -0,0 +1,152 @@
+/*
+Copyright IBM Corp. 2016 All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package buckettree
+
+import (
+ "sync"
+ "time"
+ "unsafe"
+
+ "github.com/hyperledger/fabric/core/db"
+ "github.com/hyperledger/fabric/core/ledger/perfstat"
+ "github.com/hyperledger/fabric/core/ledger/statemgmt"
+)
+
+var defaultBucketCacheMaxSize = 100 // MBs
+
+// We can create a cache and keep all the bucket nodes pre-loaded.
+// Since, the bucket nodes do not contain actual data and max possible
+// buckets are pre-determined, the memory demand may not be very high or can easily
+// be controlled - by keeping seletive buckets in the cache (most likely first few levels of the bucket tree - because,
+// higher the level of the bucket, more are the chances that the bucket would be required for recomputation of hash)
+type bucketCache struct {
+ isEnabled bool
+ c map[bucketKey]*bucketNode
+ lock sync.RWMutex
+ size uint64
+ maxSize uint64
+}
+
+func newBucketCache(maxSizeMBs int) *bucketCache {
+ isEnabled := true
+ if maxSizeMBs <= 0 {
+ isEnabled = false
+ } else {
+ logger.Infof("Constructing bucket-cache with max bucket cache size = [%d] MBs", maxSizeMBs)
+ }
+ return &bucketCache{c: make(map[bucketKey]*bucketNode), maxSize: uint64(maxSizeMBs * 1024 * 1024), isEnabled: isEnabled}
+}
+
+func (cache *bucketCache) loadAllBucketNodesFromDB() {
+ if !cache.isEnabled {
+ return
+ }
+ openchainDB := db.GetDBHandle()
+ itr := openchainDB.GetStateCFIterator()
+ defer itr.Close()
+ itr.Seek([]byte{byte(0)})
+ count := 0
+ cache.lock.Lock()
+ defer cache.lock.Unlock()
+ for ; itr.Valid(); itr.Next() {
+ key := itr.Key().Data()
+ if key[0] != byte(0) {
+ itr.Key().Free()
+ itr.Value().Free()
+ break
+ }
+ bKey := decodeBucketKey(statemgmt.Copy(itr.Key().Data()))
+ nodeBytes := statemgmt.Copy(itr.Value().Data())
+ bucketNode := unmarshalBucketNode(&bKey, nodeBytes)
+ size := bKey.size() + bucketNode.size()
+ cache.size += size
+ if cache.size >= cache.maxSize {
+ cache.size -= size
+ break
+ }
+ cache.c[bKey] = bucketNode
+ itr.Key().Free()
+ itr.Value().Free()
+ count++
+ }
+ logger.Infof("Loaded buckets data in cache. Total buckets in DB = [%d]. Total cache size:=%d", count, cache.size)
+}
+
+func (cache *bucketCache) putWithoutLock(key bucketKey, node *bucketNode) {
+ if !cache.isEnabled {
+ return
+ }
+ node.markedForDeletion = false
+ node.childrenUpdated = nil
+ existingNode, ok := cache.c[key]
+ size := uint64(0)
+ if ok {
+ size = node.size() - existingNode.size()
+ cache.size += size
+ if cache.size > cache.maxSize {
+ delete(cache.c, key)
+ cache.size -= (key.size() + existingNode.size())
+ } else {
+ cache.c[key] = node
+ }
+ } else {
+ size = node.size()
+ cache.size += size
+ if cache.size > cache.maxSize {
+ return
+ }
+ cache.c[key] = node
+ }
+}
+
+func (cache *bucketCache) get(key bucketKey) (*bucketNode, error) {
+ defer perfstat.UpdateTimeStat("timeSpent", time.Now())
+ if !cache.isEnabled {
+ return fetchBucketNodeFromDB(&key)
+ }
+ cache.lock.RLock()
+ defer cache.lock.RUnlock()
+ bucketNode := cache.c[key]
+ if bucketNode == nil {
+ return fetchBucketNodeFromDB(&key)
+ }
+ return bucketNode, nil
+}
+
+func (cache *bucketCache) removeWithoutLock(key bucketKey) {
+ if !cache.isEnabled {
+ return
+ }
+ node, ok := cache.c[key]
+ if ok {
+ cache.size -= (key.size() + node.size())
+ delete(cache.c, key)
+ }
+}
+
+func (bk bucketKey) size() uint64 {
+ return uint64(unsafe.Sizeof(bk))
+}
+
+func (bNode *bucketNode) size() uint64 {
+ size := uint64(unsafe.Sizeof(*bNode))
+ numChildHashes := len(bNode.childrenCryptoHash)
+ if numChildHashes > 0 {
+ size += uint64(numChildHashes * len(bNode.childrenCryptoHash[0]))
+ }
+ return size
+}
diff --git a/core/ledger/statemgmt/buckettree/bucket_cache_test.go b/core/ledger/statemgmt/buckettree/bucket_cache_test.go
new file mode 100644
index 00000000000..f2beb3d9459
--- /dev/null
+++ b/core/ledger/statemgmt/buckettree/bucket_cache_test.go
@@ -0,0 +1,73 @@
+/*
+Copyright IBM Corp. 2016 All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package buckettree
+
+import (
+ "testing"
+
+ "github.com/hyperledger/fabric/core/ledger/statemgmt"
+ "github.com/hyperledger/fabric/core/ledger/testutil"
+ "github.com/op/go-logging"
+)
+
+func TestBucketCache(t *testing.T) {
+ testutil.SetLogLevel(logging.INFO, "buckettree")
+ rootHash1, rootHash2, rootHash3, rootHash4 := testGetRootHashes(t, false)
+ rootHash5, rootHash6, rootHash7, rootHash8 := testGetRootHashes(t, true)
+ testutil.AssertEquals(t, rootHash1, rootHash5)
+ testutil.AssertEquals(t, rootHash2, rootHash6)
+ testutil.AssertEquals(t, rootHash3, rootHash7)
+ testutil.AssertEquals(t, rootHash4, rootHash8)
+}
+
+func testGetRootHashes(t *testing.T, enableBlockCache bool) ([]byte, []byte, []byte, []byte) {
+ // number of buckets at each level 26,9,3,1
+ testHasher, stateImplTestWrapper, stateDelta := createFreshDBAndInitTestStateImplWithCustomHasher(t, 26, 3)
+ // populate hash function such that they intersect at higher level buckets
+ testHasher.populate("chaincodeID1", "key1", 1)
+ testHasher.populate("chaincodeID2", "key2", 15)
+ testHasher.populate("chaincodeID3", "key3", 26)
+
+ if !enableBlockCache {
+ stateImplTestWrapper.stateImpl.bucketCache = newBucketCache(0)
+ }
+ stateDelta.Set("chaincodeID1", "key1", []byte("value1"), nil)
+ stateDelta.Set("chaincodeID2", "key2", []byte("value2"), nil)
+ stateDelta.Set("chaincodeID3", "key3", []byte("value3"), nil)
+ rootHash1 := stateImplTestWrapper.prepareWorkingSetAndComputeCryptoHash(stateDelta)
+ stateImplTestWrapper.persistChangesAndResetInMemoryChanges()
+
+ stateDelta = statemgmt.NewStateDelta()
+ stateDelta.Set("chaincodeID1", "key1", []byte("value1_new"), nil)
+ rootHash2 := stateImplTestWrapper.prepareWorkingSetAndComputeCryptoHash(stateDelta)
+ stateImplTestWrapper.persistChangesAndResetInMemoryChanges()
+
+ stateDelta = statemgmt.NewStateDelta()
+ stateDelta.Delete("chaincodeID2", "key2", nil)
+ rootHash3 := stateImplTestWrapper.prepareWorkingSetAndComputeCryptoHash(stateDelta)
+ stateImplTestWrapper.persistChangesAndResetInMemoryChanges()
+
+ if enableBlockCache {
+ stateImplTestWrapper.stateImpl.bucketCache = newBucketCache(20)
+ stateImplTestWrapper.stateImpl.bucketCache.loadAllBucketNodesFromDB()
+ }
+ stateDelta = statemgmt.NewStateDelta()
+ stateDelta.Set("chaincodeID3", "key3", []byte("value3_new"), nil)
+ rootHash4 := stateImplTestWrapper.prepareWorkingSetAndComputeCryptoHash(stateDelta)
+ stateImplTestWrapper.persistChangesAndResetInMemoryChanges()
+ return rootHash1, rootHash2, rootHash3, rootHash4
+}
diff --git a/core/ledger/statemgmt/buckettree/bucket_hash.go b/core/ledger/statemgmt/buckettree/bucket_hash.go
new file mode 100644
index 00000000000..154d51d68e6
--- /dev/null
+++ b/core/ledger/statemgmt/buckettree/bucket_hash.go
@@ -0,0 +1,80 @@
+/*
+Copyright IBM Corp. 2016 All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package buckettree
+
+import (
+ "github.com/golang/protobuf/proto"
+ openchainUtil "github.com/hyperledger/fabric/core/util"
+)
+
+type bucketHashCalculator struct {
+ bucketKey *bucketKey
+ currentChaincodeID string
+ dataNodes []*dataNode
+ hashingData []byte
+}
+
+func newBucketHashCalculator(bucketKey *bucketKey) *bucketHashCalculator {
+ return &bucketHashCalculator{bucketKey, "", nil, nil}
+}
+
+// addNextNode - this method assumes that the datanodes are added in the increasing order of the keys
+func (c *bucketHashCalculator) addNextNode(dataNode *dataNode) {
+ chaincodeID, _ := dataNode.getKeyElements()
+ if chaincodeID != c.currentChaincodeID {
+ c.appendCurrentChaincodeData()
+ c.currentChaincodeID = chaincodeID
+ c.dataNodes = nil
+ }
+ c.dataNodes = append(c.dataNodes, dataNode)
+}
+
+func (c *bucketHashCalculator) computeCryptoHash() []byte {
+ if c.currentChaincodeID != "" {
+ c.appendCurrentChaincodeData()
+ c.currentChaincodeID = ""
+ c.dataNodes = nil
+ }
+ logger.Debugf("Hashable content for bucket [%s]: length=%d, contentInStringForm=[%s]", c.bucketKey, len(c.hashingData), string(c.hashingData))
+ if c.hashingData == nil {
+ return nil
+ }
+ return openchainUtil.ComputeCryptoHash(c.hashingData)
+}
+
+func (c *bucketHashCalculator) appendCurrentChaincodeData() {
+ if c.currentChaincodeID == "" {
+ return
+ }
+ c.appendSizeAndData([]byte(c.currentChaincodeID))
+ c.appendSize(len(c.dataNodes))
+ for _, dataNode := range c.dataNodes {
+ _, key := dataNode.getKeyElements()
+ value := dataNode.getValue()
+ c.appendSizeAndData([]byte(key))
+ c.appendSizeAndData(value)
+ }
+}
+
+func (c *bucketHashCalculator) appendSizeAndData(b []byte) {
+ c.appendSize(len(b))
+ c.hashingData = append(c.hashingData, b...)
+}
+
+func (c *bucketHashCalculator) appendSize(size int) {
+ c.hashingData = append(c.hashingData, proto.EncodeVarint(uint64(size))...)
+}
diff --git a/core/ledger/statemgmt/buckettree/bucket_hash_test.go b/core/ledger/statemgmt/buckettree/bucket_hash_test.go
new file mode 100644
index 00000000000..3bffcd8786f
--- /dev/null
+++ b/core/ledger/statemgmt/buckettree/bucket_hash_test.go
@@ -0,0 +1,48 @@
+/*
+Copyright IBM Corp. 2016 All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package buckettree
+
+import (
+ "testing"
+
+ "github.com/hyperledger/fabric/core/ledger/testutil"
+)
+
+func TestBucketHashCalculator(t *testing.T) {
+ initConfig(nil)
+ c := newBucketHashCalculator(newBucketKey(1, 1))
+
+ testutil.AssertEquals(t, c.computeCryptoHash(), nil)
+
+ c.addNextNode(newDataNode(newDataKey("chaincodeID1", "key1"), []byte("value1")))
+
+ c.addNextNode(newDataNode(newDataKey("chaincodeID_2", "key_1"), []byte("value_1")))
+ c.addNextNode(newDataNode(newDataKey("chaincodeID_2", "key_2"), []byte("value_2")))
+
+ c.addNextNode(newDataNode(newDataKey("chaincodeID3", "key1"), []byte("value1")))
+ c.addNextNode(newDataNode(newDataKey("chaincodeID3", "key2"), []byte("value2")))
+ c.addNextNode(newDataNode(newDataKey("chaincodeID3", "key3"), []byte("value3")))
+
+ hash := c.computeCryptoHash()
+ expectedHashContent := expectedBucketHashContentForTest(
+ []string{"chaincodeID1", "key1", "value1"},
+ []string{"chaincodeID_2", "key_1", "value_1", "key_2", "value_2"},
+ []string{"chaincodeID3", "key1", "value1", "key2", "value2", "key3", "value3"},
+ )
+ t.Logf("Actual HashContent = %#v\n Expected HashContent = %#v", c.hashingData, expectedHashContent)
+ testutil.AssertEquals(t, hash, testutil.ComputeCryptoHash(expectedHashContent))
+}
diff --git a/core/ledger/statemgmt/buckettree/bucket_key.go b/core/ledger/statemgmt/buckettree/bucket_key.go
new file mode 100644
index 00000000000..f87a7b5c659
--- /dev/null
+++ b/core/ledger/statemgmt/buckettree/bucket_key.go
@@ -0,0 +1,92 @@
+/*
+Copyright IBM Corp. 2016 All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package buckettree
+
+import (
+ "fmt"
+
+ "github.com/golang/protobuf/proto"
+)
+
+type bucketKey struct {
+ level int
+ bucketNumber int
+}
+
+func newBucketKey(level int, bucketNumber int) *bucketKey {
+ if level > conf.getLowestLevel() || level < 0 {
+ panic(fmt.Errorf("Invalid Level [%d] for bucket key. Level can be between 0 and [%d]", level, conf.lowestLevel))
+ }
+
+ if bucketNumber < 1 || bucketNumber > conf.getNumBuckets(level) {
+ panic(fmt.Errorf("Invalid bucket number [%d]. Bucket nuber at level [%d] can be between 1 and [%d]", bucketNumber, level, conf.getNumBuckets(level)))
+ }
+ return &bucketKey{level, bucketNumber}
+}
+
+func newBucketKeyAtLowestLevel(bucketNumber int) *bucketKey {
+ return newBucketKey(conf.getLowestLevel(), bucketNumber)
+}
+
+func constructRootBucketKey() *bucketKey {
+ return newBucketKey(0, 1)
+}
+
+func decodeBucketKey(keyBytes []byte) bucketKey {
+ level, numBytesRead := proto.DecodeVarint(keyBytes[1:])
+ bucketNumber, _ := proto.DecodeVarint(keyBytes[numBytesRead+1:])
+ return bucketKey{int(level), int(bucketNumber)}
+}
+
+func (bucketKey *bucketKey) getParentKey() *bucketKey {
+ return newBucketKey(bucketKey.level-1, conf.computeParentBucketNumber(bucketKey.bucketNumber))
+}
+
+func (bucketKey *bucketKey) equals(anotherBucketKey *bucketKey) bool {
+ return bucketKey.level == anotherBucketKey.level && bucketKey.bucketNumber == anotherBucketKey.bucketNumber
+}
+
+func (bucketKey *bucketKey) getChildIndex(childKey *bucketKey) int {
+ bucketNumberOfFirstChild := ((bucketKey.bucketNumber - 1) * conf.getMaxGroupingAtEachLevel()) + 1
+ bucketNumberOfLastChild := bucketKey.bucketNumber * conf.getMaxGroupingAtEachLevel()
+ if childKey.bucketNumber < bucketNumberOfFirstChild || childKey.bucketNumber > bucketNumberOfLastChild {
+ panic(fmt.Errorf("[%#v] is not a valid child bucket of [%#v]", childKey, bucketKey))
+ }
+ return childKey.bucketNumber - bucketNumberOfFirstChild
+}
+
+func (bucketKey *bucketKey) getChildKey(index int) *bucketKey {
+ bucketNumberOfFirstChild := ((bucketKey.bucketNumber - 1) * conf.getMaxGroupingAtEachLevel()) + 1
+ bucketNumberOfChild := bucketNumberOfFirstChild + index
+ return newBucketKey(bucketKey.level+1, bucketNumberOfChild)
+}
+
+func (bucketKey *bucketKey) getEncodedBytes() []byte {
+ encodedBytes := []byte{}
+ encodedBytes = append(encodedBytes, byte(0))
+ encodedBytes = append(encodedBytes, proto.EncodeVarint(uint64(bucketKey.level))...)
+ encodedBytes = append(encodedBytes, proto.EncodeVarint(uint64(bucketKey.bucketNumber))...)
+ return encodedBytes
+}
+
+func (bucketKey *bucketKey) String() string {
+ return fmt.Sprintf("level=[%d], bucketNumber=[%d]", bucketKey.level, bucketKey.bucketNumber)
+}
+
+func (bucketKey *bucketKey) clone() *bucketKey {
+ return newBucketKey(bucketKey.level, bucketKey.bucketNumber)
+}
diff --git a/core/ledger/statemgmt/buckettree/bucket_key_test.go b/core/ledger/statemgmt/buckettree/bucket_key_test.go
new file mode 100644
index 00000000000..87854354167
--- /dev/null
+++ b/core/ledger/statemgmt/buckettree/bucket_key_test.go
@@ -0,0 +1,96 @@
+/*
+Copyright IBM Corp. 2016 All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package buckettree
+
+import (
+ "testing"
+
+ "github.com/hyperledger/fabric/core/ledger/testutil"
+)
+
+func TestBucketKeyGetParentKey(t *testing.T) {
+ conf = newConfig(26, 2, fnvHash)
+ bucketKey := newBucketKey(5, 24)
+ parentKey := bucketKey.getParentKey()
+ testutil.AssertEquals(t, parentKey.level, 4)
+ testutil.AssertEquals(t, parentKey.bucketNumber, 12)
+
+ bucketKey = newBucketKey(5, 25)
+ parentKey = bucketKey.getParentKey()
+ testutil.AssertEquals(t, parentKey.level, 4)
+ testutil.AssertEquals(t, parentKey.bucketNumber, 13)
+
+ conf = newConfig(26, 3, fnvHash)
+ bucketKey = newBucketKey(3, 24)
+ parentKey = bucketKey.getParentKey()
+ testutil.AssertEquals(t, parentKey.level, 2)
+ testutil.AssertEquals(t, parentKey.bucketNumber, 8)
+
+ bucketKey = newBucketKey(3, 25)
+ parentKey = bucketKey.getParentKey()
+ testutil.AssertEquals(t, parentKey.level, 2)
+ testutil.AssertEquals(t, parentKey.bucketNumber, 9)
+}
+
+func TestBucketKeyEqual(t *testing.T) {
+ conf = newConfig(26, 3, fnvHash)
+ bucketKey1 := newBucketKey(1, 2)
+ bucketKey2 := newBucketKey(1, 2)
+ testutil.AssertEquals(t, bucketKey1.equals(bucketKey2), true)
+ bucketKey2 = newBucketKey(2, 2)
+ testutil.AssertEquals(t, bucketKey1.equals(bucketKey2), false)
+ bucketKey2 = newBucketKey(1, 3)
+ testutil.AssertEquals(t, bucketKey1.equals(bucketKey2), false)
+ bucketKey2 = newBucketKey(2, 3)
+ testutil.AssertEquals(t, bucketKey1.equals(bucketKey2), false)
+}
+
+func TestBucketKeyWrongLevelCausePanic(t *testing.T) {
+ conf = newConfig(26, 3, fnvHash)
+ defer testutil.AssertPanic(t, "A panic should occur when asking for a level beyond a valid range")
+ newBucketKey(4, 1)
+}
+
+func TestBucketKeyWrongBucketNumberCausePanic_1(t *testing.T) {
+ conf = newConfig(26, 3, fnvHash)
+ defer testutil.AssertPanic(t, "A panic should occur when asking for a level beyond a valid range")
+ newBucketKey(1, 4)
+}
+
+func TestBucketKeyWrongBucketNumberCausePanic_2(t *testing.T) {
+ conf = newConfig(26, 3, fnvHash)
+ defer testutil.AssertPanic(t, "A panic should occur when asking for a level beyond a valid range")
+ newBucketKey(3, 27)
+}
+
+func TestBucketKeyWrongBucketNumberCausePanic_3(t *testing.T) {
+ conf = newConfig(26, 3, fnvHash)
+ defer testutil.AssertPanic(t, "A panic should occur when asking for a level beyond a valid range")
+ newBucketKey(0, 2)
+}
+
+func TestBucketKeyGetChildIndex(t *testing.T) {
+ conf = newConfig(26, 3, fnvHash)
+ bucketKey := newBucketKey(3, 22)
+ testutil.AssertEquals(t, bucketKey.getParentKey().getChildIndex(bucketKey), 0)
+
+ bucketKey = newBucketKey(3, 23)
+ testutil.AssertEquals(t, bucketKey.getParentKey().getChildIndex(bucketKey), 1)
+
+ bucketKey = newBucketKey(3, 24)
+ testutil.AssertEquals(t, bucketKey.getParentKey().getChildIndex(bucketKey), 2)
+}
diff --git a/core/ledger/statemgmt/buckettree/bucket_node.go b/core/ledger/statemgmt/buckettree/bucket_node.go
new file mode 100644
index 00000000000..7b1f5cea1db
--- /dev/null
+++ b/core/ledger/statemgmt/buckettree/bucket_node.go
@@ -0,0 +1,122 @@
+/*
+Copyright IBM Corp. 2016 All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package buckettree
+
+import (
+ "fmt"
+
+ "github.com/golang/protobuf/proto"
+ openchainUtil "github.com/hyperledger/fabric/core/util"
+)
+
+type bucketNode struct {
+ bucketKey *bucketKey
+ childrenCryptoHash [][]byte
+ childrenUpdated []bool
+ markedForDeletion bool
+}
+
+func newBucketNode(bucketKey *bucketKey) *bucketNode {
+ maxChildren := conf.getMaxGroupingAtEachLevel()
+ return &bucketNode{bucketKey, make([][]byte, maxChildren), make([]bool, maxChildren), false}
+}
+
+func unmarshalBucketNode(bucketKey *bucketKey, serializedBytes []byte) *bucketNode {
+ bucketNode := newBucketNode(bucketKey)
+ buffer := proto.NewBuffer(serializedBytes)
+ for i := 0; i < conf.getMaxGroupingAtEachLevel(); i++ {
+ childCryptoHash, err := buffer.DecodeRawBytes(false)
+ if err != nil {
+ panic(fmt.Errorf("this error should not occur: %s", err))
+ }
+ //protobuf's buffer.EncodeRawBytes/buffer.DecodeRawBytes convert a nil into a zero length byte-array, so nil check would not work
+ if len(childCryptoHash) != 0 {
+ bucketNode.childrenCryptoHash[i] = childCryptoHash
+ }
+ }
+ return bucketNode
+}
+
+func (bucketNode *bucketNode) marshal() []byte {
+ buffer := proto.NewBuffer([]byte{})
+ for i := 0; i < conf.getMaxGroupingAtEachLevel(); i++ {
+ buffer.EncodeRawBytes(bucketNode.childrenCryptoHash[i])
+ }
+ return buffer.Bytes()
+}
+
+func (bucketNode *bucketNode) setChildCryptoHash(childKey *bucketKey, cryptoHash []byte) {
+ i := bucketNode.bucketKey.getChildIndex(childKey)
+ bucketNode.childrenCryptoHash[i] = cryptoHash
+ bucketNode.childrenUpdated[i] = true
+}
+
+func (bucketNode *bucketNode) mergeBucketNode(anotherBucketNode *bucketNode) {
+ if !bucketNode.bucketKey.equals(anotherBucketNode.bucketKey) {
+ panic(fmt.Errorf("Nodes with different keys can not be merged. BaseKey=[%#v], MergeKey=[%#v]", bucketNode.bucketKey, anotherBucketNode.bucketKey))
+ }
+ for i, childCryptoHash := range anotherBucketNode.childrenCryptoHash {
+ if !bucketNode.childrenUpdated[i] {
+ bucketNode.childrenCryptoHash[i] = childCryptoHash
+ }
+ }
+}
+
+func (bucketNode *bucketNode) computeCryptoHash() []byte {
+ cryptoHashContent := []byte{}
+ numChildren := 0
+ for i, childCryptoHash := range bucketNode.childrenCryptoHash {
+ if childCryptoHash != nil {
+ numChildren++
+ logger.Debugf("Appending crypto-hash for child bucket = [%s]", bucketNode.bucketKey.getChildKey(i))
+ cryptoHashContent = append(cryptoHashContent, childCryptoHash...)
+ }
+ }
+ if numChildren == 0 {
+ logger.Debugf("Returning crypto-hash of bucket = [%s] - because, it has not children", bucketNode.bucketKey)
+ bucketNode.markedForDeletion = true
+ return nil
+ }
+ if numChildren == 1 {
+ logger.Debugf("Propagating crypto-hash of single child node for bucket = [%s]", bucketNode.bucketKey)
+ return cryptoHashContent
+ }
+ logger.Debugf("Computing crypto-hash for bucket [%s] by merging [%d] children", bucketNode.bucketKey, numChildren)
+ return openchainUtil.ComputeCryptoHash(cryptoHashContent)
+}
+
+func (bucketNode *bucketNode) String() string {
+ numChildren := 0
+ for i := range bucketNode.childrenCryptoHash {
+ if bucketNode.childrenCryptoHash[i] != nil {
+ numChildren++
+ }
+ }
+ str := fmt.Sprintf("bucketKey={%s}\n NumChildren={%d}\n", bucketNode.bucketKey, numChildren)
+ if numChildren == 0 {
+ return str
+ }
+
+ str = str + "Childern crypto-hashes:\n"
+ for i := range bucketNode.childrenCryptoHash {
+ childCryptoHash := bucketNode.childrenCryptoHash[i]
+ if childCryptoHash != nil {
+ str = str + fmt.Sprintf("childNumber={%d}, cryptoHash={%x}\n", i, childCryptoHash)
+ }
+ }
+ return str
+}
diff --git a/core/ledger/statemgmt/buckettree/bucket_node_test.go b/core/ledger/statemgmt/buckettree/bucket_node_test.go
new file mode 100644
index 00000000000..f9e992a9797
--- /dev/null
+++ b/core/ledger/statemgmt/buckettree/bucket_node_test.go
@@ -0,0 +1,74 @@
+/*
+Copyright IBM Corp. 2016 All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package buckettree
+
+import (
+ "testing"
+
+ "github.com/hyperledger/fabric/core/ledger/testutil"
+)
+
+func TestBucketNodeComputeHash(t *testing.T) {
+ conf = newConfig(26, 3, fnvHash)
+ bucketNode := newBucketNode(newBucketKey(2, 7))
+ testutil.AssertEquals(t, bucketNode.computeCryptoHash(), nil)
+
+ childKey1 := newBucketKey(3, 19)
+ bucketNode.setChildCryptoHash(childKey1, []byte("cryptoHashChild1"))
+ testutil.AssertEquals(t, bucketNode.computeCryptoHash(), []byte("cryptoHashChild1"))
+
+ childKey3 := newBucketKey(3, 21)
+ bucketNode.setChildCryptoHash(childKey3, []byte("cryptoHashChild3"))
+ testutil.AssertEquals(t, bucketNode.computeCryptoHash(), testutil.ComputeCryptoHash([]byte("cryptoHashChild1cryptoHashChild3")))
+
+ childKey2 := newBucketKey(3, 20)
+ bucketNode.setChildCryptoHash(childKey2, []byte("cryptoHashChild2"))
+ testutil.AssertEquals(t, bucketNode.computeCryptoHash(), testutil.ComputeCryptoHash([]byte("cryptoHashChild1cryptoHashChild2cryptoHashChild3")))
+}
+
+func TestBucketNodeMerge(t *testing.T) {
+ conf = newConfig(26, 3, fnvHash)
+ bucketNode := newBucketNode(newBucketKey(2, 7))
+ bucketNode.childrenCryptoHash[0] = []byte("cryptoHashChild1")
+ bucketNode.childrenUpdated[0] = true
+ bucketNode.childrenCryptoHash[2] = []byte("cryptoHashChild3")
+ bucketNode.childrenUpdated[2] = true
+
+ dbBucketNode := newBucketNode(newBucketKey(2, 7))
+ dbBucketNode.childrenCryptoHash[0] = []byte("DBcryptoHashChild1")
+ dbBucketNode.childrenCryptoHash[1] = []byte("DBcryptoHashChild2")
+
+ bucketNode.mergeBucketNode(dbBucketNode)
+ testutil.AssertEquals(t, bucketNode.childrenCryptoHash[0], []byte("cryptoHashChild1"))
+ testutil.AssertEquals(t, bucketNode.childrenCryptoHash[1], []byte("DBcryptoHashChild2"))
+ testutil.AssertEquals(t, bucketNode.childrenCryptoHash[2], []byte("cryptoHashChild3"))
+}
+
+func TestBucketNodeMarshalUnmarshal(t *testing.T) {
+ conf = newConfig(26, 3, fnvHash)
+ bucketNode := newBucketNode(newBucketKey(2, 7))
+ childKey1 := newBucketKey(3, 19)
+ bucketNode.setChildCryptoHash(childKey1, []byte("cryptoHashChild1"))
+
+ childKey3 := newBucketKey(3, 21)
+ bucketNode.setChildCryptoHash(childKey3, []byte("cryptoHashChild3"))
+
+ serializedBytes := bucketNode.marshal()
+ deserializedBucketNode := unmarshalBucketNode(newBucketKey(2, 7), serializedBytes)
+ testutil.AssertEquals(t, bucketNode.bucketKey, deserializedBucketNode.bucketKey)
+ testutil.AssertEquals(t, bucketNode.childrenCryptoHash, deserializedBucketNode.childrenCryptoHash)
+}
diff --git a/core/ledger/statemgmt/buckettree/bucket_tree_delta.go b/core/ledger/statemgmt/buckettree/bucket_tree_delta.go
new file mode 100644
index 00000000000..f81350de106
--- /dev/null
+++ b/core/ledger/statemgmt/buckettree/bucket_tree_delta.go
@@ -0,0 +1,65 @@
+/*
+Copyright IBM Corp. 2016 All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package buckettree
+
+type byBucketNumber map[int]*bucketNode
+
+type bucketTreeDelta struct {
+ byLevel map[int]byBucketNumber
+}
+
+func newBucketTreeDelta() *bucketTreeDelta {
+ return &bucketTreeDelta{make(map[int]byBucketNumber)}
+}
+
+func (bucketTreeDelta *bucketTreeDelta) getOrCreateBucketNode(bucketKey *bucketKey) *bucketNode {
+ byBucketNumber := bucketTreeDelta.byLevel[bucketKey.level]
+ if byBucketNumber == nil {
+ byBucketNumber = make(map[int]*bucketNode)
+ bucketTreeDelta.byLevel[bucketKey.level] = byBucketNumber
+ }
+ bucketNode := byBucketNumber[bucketKey.bucketNumber]
+ if bucketNode == nil {
+ bucketNode = newBucketNode(bucketKey)
+ byBucketNumber[bucketKey.bucketNumber] = bucketNode
+ }
+ return bucketNode
+}
+
+func (bucketTreeDelta *bucketTreeDelta) isEmpty() bool {
+ return bucketTreeDelta.byLevel == nil || len(bucketTreeDelta.byLevel) == 0
+}
+
+func (bucketTreeDelta *bucketTreeDelta) getBucketNodesAt(level int) []*bucketNode {
+ bucketNodes := []*bucketNode{}
+ byBucketNumber := bucketTreeDelta.byLevel[level]
+ if byBucketNumber == nil {
+ return nil
+ }
+ for _, bucketNode := range byBucketNumber {
+ bucketNodes = append(bucketNodes, bucketNode)
+ }
+ return bucketNodes
+}
+
+func (bucketTreeDelta *bucketTreeDelta) getRootNode() *bucketNode {
+ bucketNodes := bucketTreeDelta.getBucketNodesAt(0)
+ if bucketNodes == nil || len(bucketNodes) == 0 {
+ panic("This method should be called after processing is completed (i.e., the root node has been created)")
+ }
+ return bucketNodes[0]
+}
diff --git a/core/ledger/statemgmt/buckettree/bucket_tree_delta_test.go b/core/ledger/statemgmt/buckettree/bucket_tree_delta_test.go
new file mode 100644
index 00000000000..90b10ca14ef
--- /dev/null
+++ b/core/ledger/statemgmt/buckettree/bucket_tree_delta_test.go
@@ -0,0 +1,57 @@
+/*
+Copyright IBM Corp. 2016 All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package buckettree
+
+import (
+ "testing"
+
+ "github.com/hyperledger/fabric/core/ledger/testutil"
+)
+
+func TestBucketTreeDeltaBasic(t *testing.T) {
+ conf = newConfig(26, 3, fnvHash)
+ bucketTreeDelta := newBucketTreeDelta()
+ b1 := bucketTreeDelta.getOrCreateBucketNode(newBucketKey(2, 1))
+ testutil.AssertSame(t, bucketTreeDelta.getOrCreateBucketNode(newBucketKey(2, 1)), b1)
+ b2 := bucketTreeDelta.getOrCreateBucketNode(newBucketKey(2, 2))
+ b3 := bucketTreeDelta.getOrCreateBucketNode(newBucketKey(2, 3))
+ testutil.AssertContainsAll(t, bucketTreeDelta.getBucketNodesAt(2), []*bucketNode{b1, b2, b3})
+
+ b4 := bucketTreeDelta.getOrCreateBucketNode(newBucketKey(1, 1))
+ b5 := bucketTreeDelta.getOrCreateBucketNode(newBucketKey(1, 2))
+ testutil.AssertContainsAll(t, bucketTreeDelta.getBucketNodesAt(1), []*bucketNode{b4, b5})
+
+ b6 := bucketTreeDelta.getOrCreateBucketNode(newBucketKey(0, 1))
+ testutil.AssertContainsAll(t, bucketTreeDelta.getBucketNodesAt(0), []*bucketNode{b6})
+ testutil.AssertContainsAll(t, bucketTreeDelta.getBucketNodesAt(1), []*bucketNode{b4, b5})
+ testutil.AssertContainsAll(t, bucketTreeDelta.getBucketNodesAt(2), []*bucketNode{b1, b2, b3})
+
+ testutil.AssertEquals(t, len(bucketTreeDelta.getBucketNodesAt(0)), 1)
+ testutil.AssertEquals(t, len(bucketTreeDelta.getBucketNodesAt(1)), 2)
+ testutil.AssertEquals(t, len(bucketTreeDelta.getBucketNodesAt(2)), 3)
+
+ testutil.AssertSame(t, bucketTreeDelta.getRootNode(), b6)
+}
+
+func TestBucketTreeDeltaGetRootWithoutProcessing(t *testing.T) {
+ conf = newConfig(26, 3, fnvHash)
+ bucketTreeDelta := newBucketTreeDelta()
+ bucketKey1 := newBucketKey(2, 1)
+ bucketTreeDelta.getOrCreateBucketNode(bucketKey1)
+ defer testutil.AssertPanic(t, "A panic should have occured. Because, asking for root node without fully prosessing the bucket tree delta")
+ bucketTreeDelta.getRootNode()
+}
diff --git a/core/ledger/statemgmt/buckettree/config.go b/core/ledger/statemgmt/buckettree/config.go
new file mode 100644
index 00000000000..21f94eaebd6
--- /dev/null
+++ b/core/ledger/statemgmt/buckettree/config.go
@@ -0,0 +1,132 @@
+/*
+Copyright IBM Corp. 2016 All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package buckettree
+
+import (
+ "fmt"
+ "hash/fnv"
+)
+
+// ConfigNumBuckets - config name 'numBuckets' as it appears in yaml file
+const ConfigNumBuckets = "numBuckets"
+
+// ConfigMaxGroupingAtEachLevel - config name 'maxGroupingAtEachLevel' as it appears in yaml file
+const ConfigMaxGroupingAtEachLevel = "maxGroupingAtEachLevel"
+
+// ConfigHashFunction - config name 'hashFunction'. This is not exposed in yaml file. This configuration is used for testing with custom hash-function
+const ConfigHashFunction = "hashFunction"
+
+// DefaultNumBuckets - total buckets
+const DefaultNumBuckets = 10009
+
+// DefaultMaxGroupingAtEachLevel - Number of max buckets to group at each level.
+// Grouping is started from left. The last group may have less buckets
+const DefaultMaxGroupingAtEachLevel = 10
+
+var conf *config
+
+type config struct {
+ maxGroupingAtEachLevel int
+ lowestLevel int
+ levelToNumBucketsMap map[int]int
+ hashFunc hashFunc
+}
+
+func initConfig(configs map[string]interface{}) {
+ logger.Infof("configs passed during initialization = %#v", configs)
+
+ numBuckets, ok := configs[ConfigNumBuckets].(int)
+ if !ok {
+ numBuckets = DefaultNumBuckets
+ }
+
+ maxGroupingAtEachLevel, ok := configs[ConfigMaxGroupingAtEachLevel].(int)
+ if !ok {
+ maxGroupingAtEachLevel = DefaultMaxGroupingAtEachLevel
+ }
+
+ hashFunction, ok := configs[ConfigHashFunction].(hashFunc)
+ if !ok {
+ hashFunction = fnvHash
+ }
+ conf = newConfig(numBuckets, maxGroupingAtEachLevel, hashFunction)
+ logger.Infof("Initializing bucket tree state implemetation with configurations %+v", conf)
+}
+
+func newConfig(numBuckets int, maxGroupingAtEachLevel int, hashFunc hashFunc) *config {
+ conf := &config{maxGroupingAtEachLevel, -1, make(map[int]int), hashFunc}
+ currentLevel := 0
+ numBucketAtCurrentLevel := numBuckets
+ levelInfoMap := make(map[int]int)
+ levelInfoMap[currentLevel] = numBucketAtCurrentLevel
+ for numBucketAtCurrentLevel > 1 {
+ numBucketAtParentLevel := numBucketAtCurrentLevel / maxGroupingAtEachLevel
+ if numBucketAtCurrentLevel%maxGroupingAtEachLevel != 0 {
+ numBucketAtParentLevel++
+ }
+
+ numBucketAtCurrentLevel = numBucketAtParentLevel
+ currentLevel++
+ levelInfoMap[currentLevel] = numBucketAtCurrentLevel
+ }
+
+ conf.lowestLevel = currentLevel
+ for k, v := range levelInfoMap {
+ conf.levelToNumBucketsMap[conf.lowestLevel-k] = v
+ }
+ return conf
+}
+
+func (config *config) getNumBuckets(level int) int {
+ if level < 0 || level > config.lowestLevel {
+ panic(fmt.Errorf("level can only be between 0 and [%d]", config.lowestLevel))
+ }
+ return config.levelToNumBucketsMap[level]
+}
+
+func (config *config) computeBucketHash(data []byte) uint32 {
+ return config.hashFunc(data)
+}
+
+func (config *config) getLowestLevel() int {
+ return config.lowestLevel
+}
+
+func (config *config) getMaxGroupingAtEachLevel() int {
+ return config.maxGroupingAtEachLevel
+}
+
+func (config *config) getNumBucketsAtLowestLevel() int {
+ return config.getNumBuckets(config.getLowestLevel())
+}
+
+func (config *config) computeParentBucketNumber(bucketNumber int) int {
+ logger.Debugf("Computing parent bucket number for bucketNumber [%d]", bucketNumber)
+ parentBucketNumber := bucketNumber / config.getMaxGroupingAtEachLevel()
+ if bucketNumber%config.getMaxGroupingAtEachLevel() != 0 {
+ parentBucketNumber++
+ }
+ return parentBucketNumber
+}
+
+type hashFunc func(data []byte) uint32
+
+func fnvHash(data []byte) uint32 {
+ fnvHash := fnv.New32a()
+ fnvHash.Write(data)
+ return fnvHash.Sum32()
+}
diff --git a/core/ledger/statemgmt/buckettree/config_test.go b/core/ledger/statemgmt/buckettree/config_test.go
new file mode 100644
index 00000000000..9ddef90455a
--- /dev/null
+++ b/core/ledger/statemgmt/buckettree/config_test.go
@@ -0,0 +1,61 @@
+/*
+Copyright IBM Corp. 2016 All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package buckettree
+
+import (
+ "testing"
+
+ "github.com/hyperledger/fabric/core/ledger/testutil"
+ "github.com/spf13/viper"
+)
+
+func TestConfigInit(t *testing.T) {
+ configs := viper.GetStringMap("ledger.state.dataStructure.configs")
+ t.Logf("Configs loaded from yaml = %#v", configs)
+ testDBWrapper.CleanDB(t)
+ stateImpl := NewStateImpl()
+ stateImpl.Initialize(configs)
+ testutil.AssertEquals(t, conf.getNumBucketsAtLowestLevel(), configs[ConfigNumBuckets])
+ testutil.AssertEquals(t, conf.getMaxGroupingAtEachLevel(), configs[ConfigMaxGroupingAtEachLevel])
+}
+
+func TestConfig(t *testing.T) {
+ testConf := newConfig(26, 2, fnvHash)
+ t.Logf("conf.levelToNumBucketsMap: [%#v]", testConf.levelToNumBucketsMap)
+ testutil.AssertEquals(t, testConf.getLowestLevel(), 5)
+ testutil.AssertEquals(t, testConf.getNumBuckets(0), 1)
+ testutil.AssertEquals(t, testConf.getNumBuckets(1), 2)
+ testutil.AssertEquals(t, testConf.getNumBuckets(2), 4)
+ testutil.AssertEquals(t, testConf.getNumBuckets(3), 7)
+ testutil.AssertEquals(t, testConf.getNumBuckets(4), 13)
+ testutil.AssertEquals(t, testConf.getNumBuckets(5), 26)
+
+ testutil.AssertEquals(t, testConf.computeParentBucketNumber(25), 13)
+ testutil.AssertEquals(t, testConf.computeParentBucketNumber(9), 5)
+ testutil.AssertEquals(t, testConf.computeParentBucketNumber(10), 5)
+
+ testConf = newConfig(26, 3, fnvHash)
+ t.Logf("conf.levelToNumBucketsMap: [%#v]", testConf.levelToNumBucketsMap)
+ testutil.AssertEquals(t, testConf.getLowestLevel(), 3)
+ testutil.AssertEquals(t, testConf.getNumBuckets(0), 1)
+ testutil.AssertEquals(t, testConf.getNumBuckets(1), 3)
+ testutil.AssertEquals(t, testConf.getNumBuckets(2), 9)
+ testutil.AssertEquals(t, testConf.getNumBuckets(3), 26)
+
+ testutil.AssertEquals(t, testConf.computeParentBucketNumber(24), 8)
+ testutil.AssertEquals(t, testConf.computeParentBucketNumber(25), 9)
+}
diff --git a/core/ledger/statemgmt/buckettree/data_key.go b/core/ledger/statemgmt/buckettree/data_key.go
new file mode 100644
index 00000000000..1bd3539525c
--- /dev/null
+++ b/core/ledger/statemgmt/buckettree/data_key.go
@@ -0,0 +1,86 @@
+/*
+Copyright IBM Corp. 2016 All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package buckettree
+
+import (
+ "fmt"
+
+ "github.com/hyperledger/fabric/core/ledger/statemgmt"
+ "github.com/hyperledger/fabric/core/ledger/util"
+)
+
+type dataKey struct {
+ bucketKey *bucketKey
+ compositeKey []byte
+}
+
+func newDataKey(chaincodeID string, key string) *dataKey {
+ logger.Debugf("Enter - newDataKey. chaincodeID=[%s], key=[%s]", chaincodeID, key)
+ compositeKey := statemgmt.ConstructCompositeKey(chaincodeID, key)
+ bucketHash := conf.computeBucketHash(compositeKey)
+ // Adding one because - we start bucket-numbers 1 onwards
+ bucketNumber := int(bucketHash)%conf.getNumBucketsAtLowestLevel() + 1
+ dataKey := &dataKey{newBucketKeyAtLowestLevel(bucketNumber), compositeKey}
+ logger.Debugf("Exit - newDataKey=[%s]", dataKey)
+ return dataKey
+}
+
+func minimumPossibleDataKeyBytesFor(bucketKey *bucketKey) []byte {
+ min := encodeBucketNumber(bucketKey.bucketNumber)
+ min = append(min, byte(0))
+ return min
+}
+
+func minimumPossibleDataKeyBytes(bucketNumber int, chaincodeID string, key string) []byte {
+ b := encodeBucketNumber(bucketNumber)
+ b = append(b, statemgmt.ConstructCompositeKey(chaincodeID, key)...)
+ return b
+}
+
+func (key *dataKey) getBucketKey() *bucketKey {
+ return key.bucketKey
+}
+
+func encodeBucketNumber(bucketNumber int) []byte {
+ return util.EncodeOrderPreservingVarUint64(uint64(bucketNumber))
+}
+
+func decodeBucketNumber(encodedBytes []byte) (int, int) {
+ bucketNum, bytesConsumed := util.DecodeOrderPreservingVarUint64(encodedBytes)
+ return int(bucketNum), bytesConsumed
+}
+
+func (key *dataKey) getEncodedBytes() []byte {
+ encodedBytes := encodeBucketNumber(key.bucketKey.bucketNumber)
+ encodedBytes = append(encodedBytes, key.compositeKey...)
+ return encodedBytes
+}
+
+func newDataKeyFromEncodedBytes(encodedBytes []byte) *dataKey {
+ bucketNum, l := decodeBucketNumber(encodedBytes)
+ compositeKey := encodedBytes[l:]
+ return &dataKey{newBucketKeyAtLowestLevel(bucketNum), compositeKey}
+}
+
+func (key *dataKey) String() string {
+ return fmt.Sprintf("bucketKey=[%s], compositeKey=[%s]", key.bucketKey, string(key.compositeKey))
+}
+
+func (key *dataKey) clone() *dataKey {
+ clone := &dataKey{key.bucketKey.clone(), key.compositeKey}
+ return clone
+}
diff --git a/core/ledger/statemgmt/buckettree/data_key_test.go b/core/ledger/statemgmt/buckettree/data_key_test.go
new file mode 100644
index 00000000000..a956c3401fc
--- /dev/null
+++ b/core/ledger/statemgmt/buckettree/data_key_test.go
@@ -0,0 +1,39 @@
+/*
+Copyright IBM Corp. 2016 All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package buckettree
+
+import (
+ "testing"
+
+ "github.com/hyperledger/fabric/core/ledger/testutil"
+)
+
+func TestDataKey(t *testing.T) {
+ conf = newConfig(26, 3, fnvHash)
+ dataKey := newDataKey("chaincodeID", "key")
+ encodedBytes := dataKey.getEncodedBytes()
+ dataKeyFromEncodedBytes := newDataKeyFromEncodedBytes(encodedBytes)
+ testutil.AssertEquals(t, dataKey, dataKeyFromEncodedBytes)
+}
+
+func TestDataKeyGetBucketKey(t *testing.T) {
+ conf = newConfig(26, 3, fnvHash)
+ newDataKey("chaincodeID1", "key1").getBucketKey()
+ newDataKey("chaincodeID1", "key2").getBucketKey()
+ newDataKey("chaincodeID2", "key1").getBucketKey()
+ newDataKey("chaincodeID2", "key2").getBucketKey()
+}
diff --git a/core/ledger/statemgmt/buckettree/data_node.go b/core/ledger/statemgmt/buckettree/data_node.go
new file mode 100644
index 00000000000..a35bb5da770
--- /dev/null
+++ b/core/ledger/statemgmt/buckettree/data_node.go
@@ -0,0 +1,60 @@
+/*
+Copyright IBM Corp. 2016 All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package buckettree
+
+import (
+ "fmt"
+
+ "github.com/hyperledger/fabric/core/ledger/statemgmt"
+)
+
+type dataNode struct {
+ dataKey *dataKey
+ value []byte
+}
+
+func newDataNode(dataKey *dataKey, value []byte) *dataNode {
+ return &dataNode{dataKey, value}
+}
+
+func unmarshalDataNodeFromBytes(keyBytes []byte, valueBytes []byte) *dataNode {
+ return unmarshalDataNode(newDataKeyFromEncodedBytes(keyBytes), valueBytes)
+}
+
+func unmarshalDataNode(dataKey *dataKey, serializedBytes []byte) *dataNode {
+ return &dataNode{dataKey, serializedBytes}
+}
+
+func (dataNode *dataNode) getCompositeKey() []byte {
+ return dataNode.dataKey.compositeKey
+}
+
+func (dataNode *dataNode) isDelete() bool {
+ return dataNode.value == nil
+}
+
+func (dataNode *dataNode) getKeyElements() (string, string) {
+ return statemgmt.DecodeCompositeKey(dataNode.getCompositeKey())
+}
+
+func (dataNode *dataNode) getValue() []byte {
+ return dataNode.value
+}
+
+func (dataNode *dataNode) String() string {
+ return fmt.Sprintf("dataKey=[%s], value=[%s]", dataNode.dataKey, string(dataNode.value))
+}
diff --git a/core/ledger/statemgmt/buckettree/data_nodes_delta.go b/core/ledger/statemgmt/buckettree/data_nodes_delta.go
new file mode 100644
index 00000000000..e8f54918ca6
--- /dev/null
+++ b/core/ledger/statemgmt/buckettree/data_nodes_delta.go
@@ -0,0 +1,85 @@
+/*
+Copyright IBM Corp. 2016 All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package buckettree
+
+import (
+ "bytes"
+ "sort"
+
+ "github.com/hyperledger/fabric/core/ledger/statemgmt"
+)
+
+// Code for managing changes in data nodes
+type dataNodes []*dataNode
+
+func (dataNodes dataNodes) Len() int {
+ return len(dataNodes)
+}
+
+func (dataNodes dataNodes) Swap(i, j int) {
+ dataNodes[i], dataNodes[j] = dataNodes[j], dataNodes[i]
+}
+
+func (dataNodes dataNodes) Less(i, j int) bool {
+ return bytes.Compare(dataNodes[i].dataKey.compositeKey, dataNodes[j].dataKey.compositeKey) < 0
+}
+
+type dataNodesDelta struct {
+ byBucket map[bucketKey]dataNodes
+}
+
+func newDataNodesDelta(stateDelta *statemgmt.StateDelta) *dataNodesDelta {
+ dataNodesDelta := &dataNodesDelta{make(map[bucketKey]dataNodes)}
+ chaincodeIDs := stateDelta.GetUpdatedChaincodeIds(false)
+ for _, chaincodeID := range chaincodeIDs {
+ updates := stateDelta.GetUpdates(chaincodeID)
+ for key, updatedValue := range updates {
+ if stateDelta.RollBackwards {
+ dataNodesDelta.add(chaincodeID, key, updatedValue.GetPreviousValue())
+ } else {
+ dataNodesDelta.add(chaincodeID, key, updatedValue.GetValue())
+ }
+ }
+ }
+ for _, dataNodes := range dataNodesDelta.byBucket {
+ sort.Sort(dataNodes)
+ }
+ return dataNodesDelta
+}
+
+func (dataNodesDelta *dataNodesDelta) add(chaincodeID string, key string, value []byte) {
+ dataKey := newDataKey(chaincodeID, key)
+ bucketKey := dataKey.getBucketKey()
+ dataNode := newDataNode(dataKey, value)
+ logger.Debugf("Adding dataNode=[%s] against bucketKey=[%s]", dataNode, bucketKey)
+ dataNodesDelta.byBucket[*bucketKey] = append(dataNodesDelta.byBucket[*bucketKey], dataNode)
+}
+
+func (dataNodesDelta *dataNodesDelta) getAffectedBuckets() []*bucketKey {
+ changedBuckets := []*bucketKey{}
+ for bucketKey := range dataNodesDelta.byBucket {
+ copyOfBucketKey := bucketKey.clone()
+ logger.Debugf("Adding changed bucket [%s]", copyOfBucketKey)
+ changedBuckets = append(changedBuckets, copyOfBucketKey)
+ }
+ logger.Debugf("Changed buckets are = [%s]", changedBuckets)
+ return changedBuckets
+}
+
+func (dataNodesDelta *dataNodesDelta) getSortedDataNodesFor(bucketKey *bucketKey) dataNodes {
+ return dataNodesDelta.byBucket[*bucketKey]
+}
diff --git a/core/ledger/statemgmt/buckettree/data_nodes_delta_test.go b/core/ledger/statemgmt/buckettree/data_nodes_delta_test.go
new file mode 100644
index 00000000000..2b2c77b1122
--- /dev/null
+++ b/core/ledger/statemgmt/buckettree/data_nodes_delta_test.go
@@ -0,0 +1,56 @@
+/*
+Copyright IBM Corp. 2016 All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package buckettree
+
+import (
+ "sort"
+ "testing"
+
+ "github.com/hyperledger/fabric/core/ledger/statemgmt"
+ "github.com/hyperledger/fabric/core/ledger/testutil"
+)
+
+func TestDataNodesSort(t *testing.T) {
+ dataNodes := dataNodes{}
+ dataNode1 := newDataNode(newDataKey("chaincodeID1", "key1"), []byte("value1_1"))
+ dataNode2 := newDataNode(newDataKey("chaincodeID1", "key2"), []byte("value1_2"))
+ dataNode3 := newDataNode(newDataKey("chaincodeID2", "key1"), []byte("value2_1"))
+ dataNode4 := newDataNode(newDataKey("chaincodeID2", "key2"), []byte("value2_2"))
+ dataNodes = append(dataNodes, []*dataNode{dataNode2, dataNode4, dataNode3, dataNode1}...)
+ sort.Sort(dataNodes)
+ testutil.AssertSame(t, dataNodes[0], dataNode1)
+ testutil.AssertSame(t, dataNodes[1], dataNode2)
+ testutil.AssertSame(t, dataNodes[2], dataNode3)
+ testutil.AssertSame(t, dataNodes[3], dataNode4)
+}
+
+func TestDataNodesDelta(t *testing.T) {
+ conf = newConfig(26, 3, fnvHash)
+ stateDelta := statemgmt.NewStateDelta()
+ stateDelta.Set("chaincodeID1", "key1", []byte("value1_1"), nil)
+ stateDelta.Set("chaincodeID1", "key2", []byte("value1_2"), nil)
+ stateDelta.Set("chaincodeID2", "key1", []byte("value2_1"), nil)
+ stateDelta.Set("chaincodeID2", "key2", []byte("value2_2"), nil)
+
+ dataNodesDelta := newDataNodesDelta(stateDelta)
+ affectedBuckets := dataNodesDelta.getAffectedBuckets()
+ testutil.AssertContains(t, affectedBuckets, newDataKey("chaincodeID1", "key1").getBucketKey())
+ testutil.AssertContains(t, affectedBuckets, newDataKey("chaincodeID1", "key2").getBucketKey())
+ testutil.AssertContains(t, affectedBuckets, newDataKey("chaincodeID2", "key1").getBucketKey())
+ testutil.AssertContains(t, affectedBuckets, newDataKey("chaincodeID2", "key2").getBucketKey())
+
+}
diff --git a/core/ledger/statemgmt/buckettree/db_helper.go b/core/ledger/statemgmt/buckettree/db_helper.go
new file mode 100644
index 00000000000..bc2d3a4893b
--- /dev/null
+++ b/core/ledger/statemgmt/buckettree/db_helper.go
@@ -0,0 +1,87 @@
+/*
+Copyright IBM Corp. 2016 All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package buckettree
+
+import (
+ "github.com/hyperledger/fabric/core/db"
+ "github.com/hyperledger/fabric/core/ledger/statemgmt"
+)
+
+func fetchDataNodeFromDB(dataKey *dataKey) (*dataNode, error) {
+ openchainDB := db.GetDBHandle()
+ nodeBytes, err := openchainDB.GetFromStateCF(dataKey.getEncodedBytes())
+ if err != nil {
+ return nil, err
+ }
+ if nodeBytes == nil {
+ logger.Debug("nodeBytes from db is nil")
+ } else if len(nodeBytes) == 0 {
+ logger.Debug("nodeBytes from db is an empty array")
+ }
+ // key does not exist
+ if nodeBytes == nil {
+ return nil, nil
+ }
+ return unmarshalDataNode(dataKey, nodeBytes), nil
+}
+
+func fetchBucketNodeFromDB(bucketKey *bucketKey) (*bucketNode, error) {
+ openchainDB := db.GetDBHandle()
+ nodeBytes, err := openchainDB.GetFromStateCF(bucketKey.getEncodedBytes())
+ if err != nil {
+ return nil, err
+ }
+ if nodeBytes == nil {
+ return nil, nil
+ }
+ return unmarshalBucketNode(bucketKey, nodeBytes), nil
+}
+
+type rawKey []byte
+
+func fetchDataNodesFromDBFor(bucketKey *bucketKey) (dataNodes, error) {
+ logger.Debugf("Fetching from DB data nodes for bucket [%s]", bucketKey)
+ openchainDB := db.GetDBHandle()
+ itr := openchainDB.GetStateCFIterator()
+ defer itr.Close()
+ minimumDataKeyBytes := minimumPossibleDataKeyBytesFor(bucketKey)
+
+ var dataNodes dataNodes
+
+ itr.Seek(minimumDataKeyBytes)
+
+ for ; itr.Valid(); itr.Next() {
+
+ // making a copy of key-value bytes because, underlying key bytes are reused by itr.
+ // no need to free slices as iterator frees memory when closed.
+ keyBytes := statemgmt.Copy(itr.Key().Data())
+ valueBytes := statemgmt.Copy(itr.Value().Data())
+
+ dataKey := newDataKeyFromEncodedBytes(keyBytes)
+ logger.Debugf("Retrieved data key [%s] from DB for bucket [%s]", dataKey, bucketKey)
+ if !dataKey.getBucketKey().equals(bucketKey) {
+ logger.Debugf("Data key [%s] from DB does not belong to bucket = [%s]. Stopping further iteration and returning results [%v]", dataKey, bucketKey, dataNodes)
+ return dataNodes, nil
+ }
+ dataNode := unmarshalDataNode(dataKey, valueBytes)
+
+ logger.Debugf("Data node [%s] from DB belongs to bucket = [%s]. Including the key in results...", dataNode, bucketKey)
+ dataNodes = append(dataNodes, dataNode)
+ }
+ logger.Debugf("Returning results [%v]", dataNodes)
+ return dataNodes, nil
+}
diff --git a/core/ledger/statemgmt/buckettree/perf_test.go b/core/ledger/statemgmt/buckettree/perf_test.go
new file mode 100644
index 00000000000..dc0aca2da20
--- /dev/null
+++ b/core/ledger/statemgmt/buckettree/perf_test.go
@@ -0,0 +1,64 @@
+/*
+Copyright IBM Corp. 2016 All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package buckettree
+
+import (
+ "flag"
+ "testing"
+
+ "github.com/hyperledger/fabric/core/ledger/statemgmt"
+ "github.com/hyperledger/fabric/core/ledger/testutil"
+ "github.com/op/go-logging"
+)
+
+func BenchmarkStateHash(b *testing.B) {
+ b.StopTimer()
+ b.Logf("testParams:%q", testParams)
+ flags := flag.NewFlagSet("testParams", flag.ExitOnError)
+ numBuckets := flags.Int("NumBuckets", 10009, "Number of buckets")
+ maxGroupingAtEachLevel := flags.Int("MaxGroupingAtEachLevel", 10, "max grouping at each level")
+ chaincodeIDPrefix := flags.String("ChaincodeIDPrefix", "cID", "The chaincodeID used in the generated workload will be ChaincodeIDPrefix_1, ChaincodeIDPrefix_2, and so on")
+ numChaincodes := flags.Int("NumChaincodes", 1, "Number of chaincodes to assume")
+ maxKeySuffix := flags.Int("MaxKeySuffix", 1, "the keys are appended with _1, _2,.. upto MaxKeySuffix")
+ numKeysToInsert := flags.Int("NumKeysToInsert", 1, "how many keys to insert in a single batch")
+ kvSize := flags.Int("KVSize", 1000, "size of the value")
+ debugMsgsOn := flags.Bool("DebugOn", false, "Trun on/off debug messages during benchmarking")
+ flags.Parse(testParams)
+
+ b.Logf(`Running test with params:
+ numbBuckets=%d, maxGroupingAtEachLevel=%d, chaincodeIDPrefix=%s, numChaincodes=%d, maxKeySuffix=%d, numKeysToInsert=%d, valueSize=%d, debugMsgs=%t`,
+ *numBuckets, *maxGroupingAtEachLevel, *chaincodeIDPrefix, *numChaincodes, *maxKeySuffix, *numKeysToInsert, *kvSize, *debugMsgsOn)
+
+ if !*debugMsgsOn {
+ testutil.SetLogLevel(logging.ERROR, "statemgmt")
+ testutil.SetLogLevel(logging.ERROR, "buckettree")
+ testutil.SetLogLevel(logging.ERROR, "db")
+ }
+
+ stateImplTestWrapper := newStateImplTestWrapperWithCustomConfig(b, *numBuckets, *maxGroupingAtEachLevel)
+ for i := 0; i < b.N; i++ {
+ b.StopTimer()
+ delta := statemgmt.ConstructRandomStateDelta(b, *chaincodeIDPrefix, *numChaincodes, *maxKeySuffix, *numKeysToInsert, *kvSize)
+ b.StartTimer()
+ stateImplTestWrapper.prepareWorkingSet(delta)
+ stateImplTestWrapper.computeCryptoHash()
+ if i == b.N-1 {
+ stateImplTestWrapper.persistChangesAndResetInMemoryChanges()
+ testDBWrapper.CloseDB(b)
+ }
+ }
+}
diff --git a/core/ledger/statemgmt/buckettree/pkg_test.go b/core/ledger/statemgmt/buckettree/pkg_test.go
new file mode 100644
index 00000000000..31d6d030f37
--- /dev/null
+++ b/core/ledger/statemgmt/buckettree/pkg_test.go
@@ -0,0 +1,198 @@
+/*
+Copyright IBM Corp. 2016 All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package buckettree
+
+import (
+ "fmt"
+ "os"
+ "testing"
+
+ "github.com/golang/protobuf/proto"
+ "github.com/hyperledger/fabric/core/db"
+ "github.com/hyperledger/fabric/core/ledger/statemgmt"
+ "github.com/hyperledger/fabric/core/ledger/testutil"
+ "github.com/tecbot/gorocksdb"
+)
+
+var testDBWrapper = db.NewTestDBWrapper()
+var testParams []string
+
+func TestMain(m *testing.M) {
+ testParams = testutil.ParseTestParams()
+ testutil.SetupTestConfig()
+ os.Exit(m.Run())
+}
+
+// testHasher is a hash function for testing.
+// It returns the hash for a key from pre-populated map
+type testHasher struct {
+ testHashFunctionInput map[string]uint32
+}
+
+func newTestHasher() *testHasher {
+ return &testHasher{make(map[string]uint32)}
+}
+
+func (testHasher *testHasher) populate(chaincodeID string, key string, hash uint32) {
+ testHasher.testHashFunctionInput[string(statemgmt.ConstructCompositeKey(chaincodeID, key))] = hash
+}
+
+func (testHasher *testHasher) getHashFunction() hashFunc {
+ return func(data []byte) uint32 {
+ key := string(data)
+ value, ok := testHasher.testHashFunctionInput[key]
+ if !ok {
+ panic(fmt.Sprintf("A test should add entry before looking up. Entry looked up = [%s]", key))
+ }
+ return value
+ }
+}
+
+type stateImplTestWrapper struct {
+ configMap map[string]interface{}
+ stateImpl *StateImpl
+ t testing.TB
+}
+
+func newStateImplTestWrapper(t testing.TB) *stateImplTestWrapper {
+ var configMap map[string]interface{}
+ stateImpl := NewStateImpl()
+ err := stateImpl.Initialize(configMap)
+ testutil.AssertNoError(t, err, "Error while constrcuting stateImpl")
+ return &stateImplTestWrapper{configMap, stateImpl, t}
+}
+
+func newStateImplTestWrapperWithCustomConfig(t testing.TB, numBuckets int, maxGroupingAtEachLevel int) *stateImplTestWrapper {
+ configMap := map[string]interface{}{ConfigNumBuckets: numBuckets, ConfigMaxGroupingAtEachLevel: maxGroupingAtEachLevel}
+ stateImpl := NewStateImpl()
+ err := stateImpl.Initialize(configMap)
+ testutil.AssertNoError(t, err, "Error while constrcuting stateImpl")
+ return &stateImplTestWrapper{configMap, stateImpl, t}
+}
+
+func createFreshDBAndInitTestStateImplWithCustomHasher(t testing.TB, numBuckets int, maxGroupingAtEachLevel int) (*testHasher, *stateImplTestWrapper, *statemgmt.StateDelta) {
+ testHasher := newTestHasher()
+ configMap := map[string]interface{}{
+ ConfigNumBuckets: numBuckets,
+ ConfigMaxGroupingAtEachLevel: maxGroupingAtEachLevel,
+ ConfigHashFunction: testHasher.getHashFunction(),
+ }
+
+ testDBWrapper.CleanDB(t)
+ stateImpl := NewStateImpl()
+ stateImpl.Initialize(configMap)
+ stateImplTestWrapper := &stateImplTestWrapper{configMap, stateImpl, t}
+ stateDelta := statemgmt.NewStateDelta()
+ return testHasher, stateImplTestWrapper, stateDelta
+}
+
+func (testWrapper *stateImplTestWrapper) constructNewStateImpl() {
+ stateImpl := NewStateImpl()
+ err := stateImpl.Initialize(testWrapper.configMap)
+ testutil.AssertNoError(testWrapper.t, err, "Error while constructing new state tree")
+ testWrapper.stateImpl = stateImpl
+}
+
+func (testWrapper *stateImplTestWrapper) get(chaincodeID string, key string) []byte {
+ value, err := testWrapper.stateImpl.Get(chaincodeID, key)
+ testutil.AssertNoError(testWrapper.t, err, "Error while getting value")
+ testWrapper.t.Logf("state value for chaincodeID,key=[%s,%s] = [%s], ", chaincodeID, key, string(value))
+ return value
+}
+
+func (testWrapper *stateImplTestWrapper) prepareWorkingSet(stateDelta *statemgmt.StateDelta) {
+ err := testWrapper.stateImpl.PrepareWorkingSet(stateDelta)
+ testutil.AssertNoError(testWrapper.t, err, "Error while PrepareWorkingSet")
+}
+
+func (testWrapper *stateImplTestWrapper) computeCryptoHash() []byte {
+ cryptoHash, err := testWrapper.stateImpl.ComputeCryptoHash()
+ testutil.AssertNoError(testWrapper.t, err, "Error while computing crypto hash")
+ return cryptoHash
+}
+
+func (testWrapper *stateImplTestWrapper) prepareWorkingSetAndComputeCryptoHash(stateDelta *statemgmt.StateDelta) []byte {
+ testWrapper.prepareWorkingSet(stateDelta)
+ return testWrapper.computeCryptoHash()
+}
+
+func (testWrapper *stateImplTestWrapper) addChangesForPersistence(writeBatch *gorocksdb.WriteBatch) {
+ err := testWrapper.stateImpl.AddChangesForPersistence(writeBatch)
+ testutil.AssertNoError(testWrapper.t, err, "Error while adding changes to db write-batch")
+}
+
+func (testWrapper *stateImplTestWrapper) persistChangesAndResetInMemoryChanges() {
+ writeBatch := gorocksdb.NewWriteBatch()
+ defer writeBatch.Destroy()
+ testWrapper.addChangesForPersistence(writeBatch)
+ testDBWrapper.WriteToDB(testWrapper.t, writeBatch)
+ testWrapper.stateImpl.ClearWorkingSet(true)
+}
+
+func (testWrapper *stateImplTestWrapper) getRangeScanIterator(chaincodeID string, startKey string, endKey string) statemgmt.RangeScanIterator {
+ itr, err := testWrapper.stateImpl.GetRangeScanIterator(chaincodeID, startKey, endKey)
+ testutil.AssertNoError(testWrapper.t, err, "Error while getting iterator")
+ return itr
+}
+
+func expectedBucketHashForTest(data ...[]string) []byte {
+ return testutil.ComputeCryptoHash(expectedBucketHashContentForTest(data...))
+}
+
+func expectedBucketHashContentForTest(data ...[]string) []byte {
+ expectedContent := []byte{}
+ for _, chaincodeData := range data {
+ expectedContent = append(expectedContent, encodeNumberForTest(len(chaincodeData[0]))...)
+ expectedContent = append(expectedContent, chaincodeData[0]...)
+ expectedContent = append(expectedContent, encodeNumberForTest((len(chaincodeData)-1)/2)...)
+ for i := 1; i < len(chaincodeData); i++ {
+ expectedContent = append(expectedContent, encodeNumberForTest(len(chaincodeData[i]))...)
+ expectedContent = append(expectedContent, chaincodeData[i]...)
+ }
+ }
+ return expectedContent
+}
+
+func encodeNumberForTest(i int) []byte {
+ return proto.EncodeVarint(uint64(i))
+}
+
+func TestExpectedBucketHashContentForTest(t *testing.T) {
+ expectedHashContent1 := expectedBucketHashContentForTest(
+ []string{"chaincodeID1", "key1", "value1"},
+ []string{"chaincodeID_2", "key_1", "value_1", "key_2", "value_2"},
+ []string{"chaincodeID3", "key1", "value1", "key2", "value2", "key3", "value3"},
+ )
+
+ expectedHashContent2 := testutil.AppendAll(
+ encodeNumberForTest(len("chaincodeID1")), []byte("chaincodeID1"),
+ encodeNumberForTest(1),
+ encodeNumberForTest(len("key1")), []byte("key1"), encodeNumberForTest(len("value1")), []byte("value1"),
+
+ encodeNumberForTest(len("chaincodeID_2")), []byte("chaincodeID_2"),
+ encodeNumberForTest(2),
+ encodeNumberForTest(len("key_1")), []byte("key_1"), encodeNumberForTest(len("value_1")), []byte("value_1"),
+ encodeNumberForTest(len("key_2")), []byte("key_2"), encodeNumberForTest(len("value_2")), []byte("value_2"),
+
+ encodeNumberForTest(len("chaincodeID3")), []byte("chaincodeID3"),
+ encodeNumberForTest(3),
+ encodeNumberForTest(len("key1")), []byte("key1"), encodeNumberForTest(len("value1")), []byte("value1"),
+ encodeNumberForTest(len("key2")), []byte("key2"), encodeNumberForTest(len("value2")), []byte("value2"),
+ encodeNumberForTest(len("key3")), []byte("key3"), encodeNumberForTest(len("value3")), []byte("value3"),
+ )
+ testutil.AssertEquals(t, expectedHashContent1, expectedHashContent2)
+}
diff --git a/core/ledger/statemgmt/buckettree/range_scan_iterator.go b/core/ledger/statemgmt/buckettree/range_scan_iterator.go
new file mode 100644
index 00000000000..dea4be9a12a
--- /dev/null
+++ b/core/ledger/statemgmt/buckettree/range_scan_iterator.go
@@ -0,0 +1,103 @@
+/*
+Copyright IBM Corp. 2016 All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package buckettree
+
+import (
+ "github.com/hyperledger/fabric/core/db"
+ "github.com/hyperledger/fabric/core/ledger/statemgmt"
+ "github.com/tecbot/gorocksdb"
+)
+
+// RangeScanIterator implements the interface 'statemgmt.RangeScanIterator'
+type RangeScanIterator struct {
+ dbItr *gorocksdb.Iterator
+ chaincodeID string
+ startKey string
+ endKey string
+ currentBucketNumber int
+ currentKey string
+ currentValue []byte
+ done bool
+}
+
+func newRangeScanIterator(chaincodeID string, startKey string, endKey string) (*RangeScanIterator, error) {
+ dbItr := db.GetDBHandle().GetStateCFIterator()
+ itr := &RangeScanIterator{
+ dbItr: dbItr,
+ chaincodeID: chaincodeID,
+ startKey: startKey,
+ endKey: endKey,
+ }
+ itr.seekForStartKeyWithinBucket(1)
+ return itr, nil
+}
+
+// Next - see interface 'statemgmt.RangeScanIterator' for details
+func (itr *RangeScanIterator) Next() bool {
+ if itr.done {
+ return false
+ }
+
+ for itr.dbItr.Valid() {
+
+ // making a copy of key-value bytes because, underlying key bytes are reused by itr.
+ // no need to free slices as iterator frees memory when closed.
+ keyBytes := statemgmt.Copy(itr.dbItr.Key().Data())
+ valueBytes := statemgmt.Copy(itr.dbItr.Value().Data())
+
+ dataNode := unmarshalDataNodeFromBytes(keyBytes, valueBytes)
+ dataKey := dataNode.dataKey
+ chaincodeID, key := statemgmt.DecodeCompositeKey(dataNode.getCompositeKey())
+ value := dataNode.value
+ logger.Debugf("Evaluating data-key = %s", dataKey)
+
+ bucketNumber := dataKey.bucketKey.bucketNumber
+ if bucketNumber > itr.currentBucketNumber {
+ itr.seekForStartKeyWithinBucket(bucketNumber)
+ continue
+ }
+
+ if chaincodeID == itr.chaincodeID && (itr.endKey == "" || key <= itr.endKey) {
+ logger.Debugf("including data-key = %s", dataKey)
+ itr.currentKey = key
+ itr.currentValue = value
+ itr.dbItr.Next()
+ return true
+ }
+
+ itr.seekForStartKeyWithinBucket(bucketNumber + 1)
+ continue
+ }
+ itr.done = true
+ return false
+}
+
+func (itr *RangeScanIterator) seekForStartKeyWithinBucket(bucketNumber int) {
+ itr.currentBucketNumber = bucketNumber
+ datakeyBytes := minimumPossibleDataKeyBytes(bucketNumber, itr.chaincodeID, itr.startKey)
+ itr.dbItr.Seek(datakeyBytes)
+}
+
+// GetKeyValue - see interface 'statemgmt.RangeScanIterator' for details
+func (itr *RangeScanIterator) GetKeyValue() (string, []byte) {
+ return itr.currentKey, itr.currentValue
+}
+
+// Close - see interface 'statemgmt.RangeScanIterator' for details
+func (itr *RangeScanIterator) Close() {
+ itr.dbItr.Close()
+}
diff --git a/core/ledger/statemgmt/buckettree/range_scan_iterator_test.go b/core/ledger/statemgmt/buckettree/range_scan_iterator_test.go
new file mode 100644
index 00000000000..c832f6b0dac
--- /dev/null
+++ b/core/ledger/statemgmt/buckettree/range_scan_iterator_test.go
@@ -0,0 +1,149 @@
+/*
+Copyright IBM Corp. 2016 All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package buckettree
+
+import (
+ "testing"
+
+ "github.com/hyperledger/fabric/core/ledger/statemgmt"
+ "github.com/hyperledger/fabric/core/ledger/testutil"
+)
+
+func TestRangeScanIterator(t *testing.T) {
+ testDBWrapper.CleanDB(t)
+ stateImplTestWrapper := newStateImplTestWrapper(t)
+ stateDelta := statemgmt.NewStateDelta()
+
+ // insert keys
+ stateDelta.Set("chaincodeID1", "key1", []byte("value1"), nil)
+
+ stateDelta.Set("chaincodeID2", "key1", []byte("value1"), nil)
+ stateDelta.Set("chaincodeID2", "key2", []byte("value2"), nil)
+ stateDelta.Set("chaincodeID2", "key3", []byte("value3"), nil)
+ stateDelta.Set("chaincodeID2", "key4", []byte("value4"), nil)
+ stateDelta.Set("chaincodeID2", "key5", []byte("value5"), nil)
+ stateDelta.Set("chaincodeID2", "key6", []byte("value6"), nil)
+ stateDelta.Set("chaincodeID2", "key7", []byte("value7"), nil)
+
+ stateDelta.Set("chaincodeID3", "key1", []byte("value1"), nil)
+
+ stateDelta.Set("chaincodeID4", "key1", []byte("value1"), nil)
+ stateDelta.Set("chaincodeID4", "key2", []byte("value2"), nil)
+ stateDelta.Set("chaincodeID4", "key3", []byte("value3"), nil)
+ stateDelta.Set("chaincodeID4", "key4", []byte("value4"), nil)
+ stateDelta.Set("chaincodeID4", "key5", []byte("value5"), nil)
+ stateDelta.Set("chaincodeID4", "key6", []byte("value6"), nil)
+ stateDelta.Set("chaincodeID4", "key7", []byte("value7"), nil)
+
+ stateDelta.Set("chaincodeID5", "key1", []byte("value5"), nil)
+ stateDelta.Set("chaincodeID6", "key1", []byte("value6"), nil)
+
+ stateImplTestWrapper.prepareWorkingSet(stateDelta)
+ stateImplTestWrapper.persistChangesAndResetInMemoryChanges()
+
+ // test range scan for chaincodeID2
+ rangeScanItr := stateImplTestWrapper.getRangeScanIterator("chaincodeID2", "key2", "key5")
+
+ var results = make(map[string][]byte)
+ for rangeScanItr.Next() {
+ key, value := rangeScanItr.GetKeyValue()
+ results[key] = value
+ }
+ t.Logf("Results = %s", results)
+ testutil.AssertEquals(t, len(results), 4)
+ testutil.AssertEquals(t, results["key2"], []byte("value2"))
+ testutil.AssertEquals(t, results["key3"], []byte("value3"))
+ testutil.AssertEquals(t, results["key4"], []byte("value4"))
+ testutil.AssertEquals(t, results["key5"], []byte("value5"))
+ rangeScanItr.Close()
+
+ // test range scan for chaincodeID4
+ rangeScanItr = stateImplTestWrapper.getRangeScanIterator("chaincodeID2", "key3", "key6")
+ results = make(map[string][]byte)
+ for rangeScanItr.Next() {
+ key, value := rangeScanItr.GetKeyValue()
+ results[key] = value
+ }
+ t.Logf("Results = %s", results)
+ testutil.AssertEquals(t, len(results), 4)
+ testutil.AssertEquals(t, results["key3"], []byte("value3"))
+ testutil.AssertEquals(t, results["key4"], []byte("value4"))
+ testutil.AssertEquals(t, results["key5"], []byte("value5"))
+ testutil.AssertEquals(t, results["key6"], []byte("value6"))
+ rangeScanItr.Close()
+
+ // test range scan for chaincodeID2 starting from first key
+ rangeScanItr = stateImplTestWrapper.getRangeScanIterator("chaincodeID2", "", "key5")
+ results = make(map[string][]byte)
+ for rangeScanItr.Next() {
+ key, value := rangeScanItr.GetKeyValue()
+ results[key] = value
+ }
+ t.Logf("Results = %s", results)
+ testutil.AssertEquals(t, len(results), 5)
+ testutil.AssertEquals(t, results["key1"], []byte("value1"))
+ testutil.AssertEquals(t, results["key2"], []byte("value2"))
+ testutil.AssertEquals(t, results["key3"], []byte("value3"))
+ testutil.AssertEquals(t, results["key4"], []byte("value4"))
+ testutil.AssertEquals(t, results["key5"], []byte("value5"))
+ rangeScanItr.Close()
+
+ // test range scan for all the keys in chaincodeID2 starting from first key
+ rangeScanItr = stateImplTestWrapper.getRangeScanIterator("chaincodeID2", "", "")
+ results = make(map[string][]byte)
+ for rangeScanItr.Next() {
+ key, value := rangeScanItr.GetKeyValue()
+ results[key] = value
+ }
+ t.Logf("Results = %s", results)
+ testutil.AssertEquals(t, len(results), 7)
+ testutil.AssertEquals(t, results["key1"], []byte("value1"))
+ testutil.AssertEquals(t, results["key2"], []byte("value2"))
+ testutil.AssertEquals(t, results["key3"], []byte("value3"))
+ testutil.AssertEquals(t, results["key4"], []byte("value4"))
+ testutil.AssertEquals(t, results["key5"], []byte("value5"))
+ testutil.AssertEquals(t, results["key6"], []byte("value6"))
+ testutil.AssertEquals(t, results["key7"], []byte("value7"))
+ rangeScanItr.Close()
+}
+
+func TestRangeScanIteratorEmptyArray(t *testing.T) {
+ testDBWrapper.CleanDB(t)
+ stateImplTestWrapper := newStateImplTestWrapper(t)
+ stateDelta := statemgmt.NewStateDelta()
+
+ // insert keys
+ stateDelta.Set("chaincodeID1", "key1", []byte("value1"), nil)
+ stateDelta.Set("chaincodeID1", "key2", []byte{}, nil)
+ stateDelta.Set("chaincodeID1", "key3", []byte{}, nil)
+
+ stateImplTestWrapper.prepareWorkingSet(stateDelta)
+ stateImplTestWrapper.persistChangesAndResetInMemoryChanges()
+
+ // test range scan for chaincodeID2
+ rangeScanItr := stateImplTestWrapper.getRangeScanIterator("chaincodeID1", "key1", "key3")
+
+ var results = make(map[string][]byte)
+ for rangeScanItr.Next() {
+ key, value := rangeScanItr.GetKeyValue()
+ results[key] = value
+ }
+ t.Logf("Results = %s", results)
+ testutil.AssertEquals(t, len(results), 3)
+ testutil.AssertEquals(t, results["key3"], []byte{})
+ rangeScanItr.Close()
+}
diff --git a/core/ledger/statemgmt/buckettree/snapshot_iterator.go b/core/ledger/statemgmt/buckettree/snapshot_iterator.go
new file mode 100644
index 00000000000..8004ab41e26
--- /dev/null
+++ b/core/ledger/statemgmt/buckettree/snapshot_iterator.go
@@ -0,0 +1,57 @@
+/*
+Copyright IBM Corp. 2016 All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package buckettree
+
+import (
+ "github.com/hyperledger/fabric/core/db"
+ "github.com/hyperledger/fabric/core/ledger/statemgmt"
+ "github.com/tecbot/gorocksdb"
+)
+
+// StateSnapshotIterator implements the interface 'statemgmt.StateSnapshotIterator'
+type StateSnapshotIterator struct {
+ dbItr *gorocksdb.Iterator
+}
+
+func newStateSnapshotIterator(snapshot *gorocksdb.Snapshot) (*StateSnapshotIterator, error) {
+ dbItr := db.GetDBHandle().GetStateCFSnapshotIterator(snapshot)
+ dbItr.Seek([]byte{0x01})
+ dbItr.Prev()
+ return &StateSnapshotIterator{dbItr}, nil
+}
+
+// Next - see interface 'statemgmt.StateSnapshotIterator' for details
+func (snapshotItr *StateSnapshotIterator) Next() bool {
+ snapshotItr.dbItr.Next()
+ return snapshotItr.dbItr.Valid()
+}
+
+// GetRawKeyValue - see interface 'statemgmt.StateSnapshotIterator' for details
+func (snapshotItr *StateSnapshotIterator) GetRawKeyValue() ([]byte, []byte) {
+
+ // making a copy of key-value bytes because, underlying key bytes are reused by itr.
+ // no need to free slices as iterator frees memory when closed.
+ keyBytes := statemgmt.Copy(snapshotItr.dbItr.Key().Data())
+ valueBytes := statemgmt.Copy(snapshotItr.dbItr.Value().Data())
+ dataNode := unmarshalDataNodeFromBytes(keyBytes, valueBytes)
+ return dataNode.getCompositeKey(), dataNode.getValue()
+}
+
+// Close - see interface 'statemgmt.StateSnapshotIterator' for details
+func (snapshotItr *StateSnapshotIterator) Close() {
+ snapshotItr.dbItr.Close()
+}
diff --git a/core/ledger/statemgmt/buckettree/snapshot_iterator_test.go b/core/ledger/statemgmt/buckettree/snapshot_iterator_test.go
new file mode 100644
index 00000000000..17b383d632e
--- /dev/null
+++ b/core/ledger/statemgmt/buckettree/snapshot_iterator_test.go
@@ -0,0 +1,68 @@
+/*
+Copyright IBM Corp. 2016 All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package buckettree
+
+import (
+ "testing"
+
+ "github.com/hyperledger/fabric/core/db"
+ "github.com/hyperledger/fabric/core/ledger/statemgmt"
+ "github.com/hyperledger/fabric/core/ledger/testutil"
+)
+
+func TestStateSnapshotIterator(t *testing.T) {
+ testDBWrapper.CleanDB(t)
+ stateImplTestWrapper := newStateImplTestWrapper(t)
+ stateDelta := statemgmt.NewStateDelta()
+
+ // insert keys
+ stateDelta.Set("chaincodeID1", "key1", []byte("value1"), nil)
+ stateDelta.Set("chaincodeID2", "key2", []byte("value2"), nil)
+ stateDelta.Set("chaincodeID3", "key3", []byte("value3"), nil)
+ stateDelta.Set("chaincodeID4", "key4", []byte("value4"), nil)
+ stateDelta.Set("chaincodeID5", "key5", []byte("value5"), nil)
+ stateDelta.Set("chaincodeID6", "key6", []byte("value6"), nil)
+ stateImplTestWrapper.prepareWorkingSet(stateDelta)
+ stateImplTestWrapper.persistChangesAndResetInMemoryChanges()
+ //check that the key is persisted
+ testutil.AssertEquals(t, stateImplTestWrapper.get("chaincodeID5", "key5"), []byte("value5"))
+
+ // take db snapeshot
+ dbSnapshot := db.GetDBHandle().GetSnapshot()
+
+ // delete keys
+ stateDelta.Delete("chaincodeID1", "key1", nil)
+ stateDelta.Delete("chaincodeID2", "key2", nil)
+ stateDelta.Delete("chaincodeID3", "key3", nil)
+ stateDelta.Delete("chaincodeID4", "key4", nil)
+ stateDelta.Delete("chaincodeID5", "key5", nil)
+ stateDelta.Delete("chaincodeID6", "key6", nil)
+ stateImplTestWrapper.prepareWorkingSet(stateDelta)
+ stateImplTestWrapper.persistChangesAndResetInMemoryChanges()
+ //check that the key is deleted
+ testutil.AssertNil(t, stateImplTestWrapper.get("chaincodeID5", "key5"))
+
+ itr, err := newStateSnapshotIterator(dbSnapshot)
+ testutil.AssertNoError(t, err, "Error while getting state snapeshot iterator")
+ numKeys := 0
+ for itr.Next() {
+ key, value := itr.GetRawKeyValue()
+ t.Logf("key=[%s], value=[%s]", string(key), string(value))
+ numKeys++
+ }
+ testutil.AssertEquals(t, numKeys, 6)
+}
diff --git a/core/ledger/statemgmt/buckettree/state_impl.go b/core/ledger/statemgmt/buckettree/state_impl.go
new file mode 100644
index 00000000000..9aa0773cd8f
--- /dev/null
+++ b/core/ledger/statemgmt/buckettree/state_impl.go
@@ -0,0 +1,302 @@
+/*
+Copyright IBM Corp. 2016 All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package buckettree
+
+import (
+ "bytes"
+
+ "github.com/hyperledger/fabric/core/db"
+ "github.com/hyperledger/fabric/core/ledger/statemgmt"
+ "github.com/op/go-logging"
+ "github.com/tecbot/gorocksdb"
+)
+
+var logger = logging.MustGetLogger("buckettree")
+
+// StateImpl - implements the interface - 'statemgmt.HashableState'
+type StateImpl struct {
+ dataNodesDelta *dataNodesDelta
+ bucketTreeDelta *bucketTreeDelta
+ persistedStateHash []byte
+ lastComputedCryptoHash []byte
+ recomputeCryptoHash bool
+ bucketCache *bucketCache
+}
+
+// NewStateImpl constructs a new StateImpl
+func NewStateImpl() *StateImpl {
+ return &StateImpl{}
+}
+
+// Initialize - method implementation for interface 'statemgmt.HashableState'
+func (stateImpl *StateImpl) Initialize(configs map[string]interface{}) error {
+ initConfig(configs)
+ rootBucketNode, err := fetchBucketNodeFromDB(constructRootBucketKey())
+ if err != nil {
+ return err
+ }
+ if rootBucketNode != nil {
+ stateImpl.persistedStateHash = rootBucketNode.computeCryptoHash()
+ stateImpl.lastComputedCryptoHash = stateImpl.persistedStateHash
+ }
+
+ bucketCacheMaxSize, ok := configs["bucketCacheSize"].(int)
+ if !ok {
+ bucketCacheMaxSize = defaultBucketCacheMaxSize
+ }
+ stateImpl.bucketCache = newBucketCache(bucketCacheMaxSize)
+ stateImpl.bucketCache.loadAllBucketNodesFromDB()
+ return nil
+}
+
+// Get - method implementation for interface 'statemgmt.HashableState'
+func (stateImpl *StateImpl) Get(chaincodeID string, key string) ([]byte, error) {
+ dataKey := newDataKey(chaincodeID, key)
+ dataNode, err := fetchDataNodeFromDB(dataKey)
+ if err != nil {
+ return nil, err
+ }
+ if dataNode == nil {
+ return nil, nil
+ }
+ return dataNode.value, nil
+}
+
+// PrepareWorkingSet - method implementation for interface 'statemgmt.HashableState'
+func (stateImpl *StateImpl) PrepareWorkingSet(stateDelta *statemgmt.StateDelta) error {
+ logger.Debug("Enter - PrepareWorkingSet()")
+ if stateDelta.IsEmpty() {
+ logger.Debug("Ignoring working-set as it is empty")
+ return nil
+ }
+ stateImpl.dataNodesDelta = newDataNodesDelta(stateDelta)
+ stateImpl.bucketTreeDelta = newBucketTreeDelta()
+ stateImpl.recomputeCryptoHash = true
+ return nil
+}
+
+// ClearWorkingSet - method implementation for interface 'statemgmt.HashableState'
+func (stateImpl *StateImpl) ClearWorkingSet(changesPersisted bool) {
+ logger.Debug("Enter - ClearWorkingSet()")
+ if changesPersisted {
+ stateImpl.persistedStateHash = stateImpl.lastComputedCryptoHash
+ stateImpl.updateBucketCache()
+ } else {
+ stateImpl.lastComputedCryptoHash = stateImpl.persistedStateHash
+ }
+ stateImpl.dataNodesDelta = nil
+ stateImpl.bucketTreeDelta = nil
+ stateImpl.recomputeCryptoHash = false
+}
+
+// ComputeCryptoHash - method implementation for interface 'statemgmt.HashableState'
+func (stateImpl *StateImpl) ComputeCryptoHash() ([]byte, error) {
+ logger.Debug("Enter - ComputeCryptoHash()")
+ if stateImpl.recomputeCryptoHash {
+ logger.Debug("Recomputing crypto-hash...")
+ err := stateImpl.processDataNodeDelta()
+ if err != nil {
+ return nil, err
+ }
+ err = stateImpl.processBucketTreeDelta()
+ if err != nil {
+ return nil, err
+ }
+ stateImpl.lastComputedCryptoHash = stateImpl.computeRootNodeCryptoHash()
+ stateImpl.recomputeCryptoHash = false
+ } else {
+ logger.Debug("Returing existing crypto-hash as recomputation not required")
+ }
+ return stateImpl.lastComputedCryptoHash, nil
+}
+
+func (stateImpl *StateImpl) processDataNodeDelta() error {
+ afftectedBuckets := stateImpl.dataNodesDelta.getAffectedBuckets()
+ for _, bucketKey := range afftectedBuckets {
+ updatedDataNodes := stateImpl.dataNodesDelta.getSortedDataNodesFor(bucketKey)
+ existingDataNodes, err := fetchDataNodesFromDBFor(bucketKey)
+ if err != nil {
+ return err
+ }
+ cryptoHashForBucket := computeDataNodesCryptoHash(bucketKey, updatedDataNodes, existingDataNodes)
+ logger.Debugf("Crypto-hash for lowest-level bucket [%s] is [%x]", bucketKey, cryptoHashForBucket)
+ parentBucket := stateImpl.bucketTreeDelta.getOrCreateBucketNode(bucketKey.getParentKey())
+ parentBucket.setChildCryptoHash(bucketKey, cryptoHashForBucket)
+ }
+ return nil
+}
+
+func (stateImpl *StateImpl) processBucketTreeDelta() error {
+ secondLastLevel := conf.getLowestLevel() - 1
+ for level := secondLastLevel; level >= 0; level-- {
+ bucketNodes := stateImpl.bucketTreeDelta.getBucketNodesAt(level)
+ logger.Debugf("Bucket tree delta. Number of buckets at level [%d] are [%d]", level, len(bucketNodes))
+ for _, bucketNode := range bucketNodes {
+ logger.Debugf("bucketNode in tree-delta [%s]", bucketNode)
+ dbBucketNode, err := stateImpl.bucketCache.get(*bucketNode.bucketKey)
+ logger.Debugf("bucket node from db [%s]", dbBucketNode)
+ if err != nil {
+ return err
+ }
+ if dbBucketNode != nil {
+ bucketNode.mergeBucketNode(dbBucketNode)
+ logger.Debugf("After merge... bucketNode in tree-delta [%s]", bucketNode)
+ }
+ if level == 0 {
+ return nil
+ }
+ logger.Debugf("Computing cryptoHash for bucket [%s]", bucketNode)
+ cryptoHash := bucketNode.computeCryptoHash()
+ logger.Debugf("cryptoHash for bucket [%s] is [%x]", bucketNode, cryptoHash)
+ parentBucket := stateImpl.bucketTreeDelta.getOrCreateBucketNode(bucketNode.bucketKey.getParentKey())
+ parentBucket.setChildCryptoHash(bucketNode.bucketKey, cryptoHash)
+ }
+ }
+ return nil
+}
+
+func (stateImpl *StateImpl) computeRootNodeCryptoHash() []byte {
+ return stateImpl.bucketTreeDelta.getRootNode().computeCryptoHash()
+}
+
+func computeDataNodesCryptoHash(bucketKey *bucketKey, updatedNodes dataNodes, existingNodes dataNodes) []byte {
+ logger.Debugf("Computing crypto-hash for bucket [%s]. numUpdatedNodes=[%d], numExistingNodes=[%d]", bucketKey, len(updatedNodes), len(existingNodes))
+ bucketHashCalculator := newBucketHashCalculator(bucketKey)
+ i := 0
+ j := 0
+ for i < len(updatedNodes) && j < len(existingNodes) {
+ updatedNode := updatedNodes[i]
+ existingNode := existingNodes[j]
+ c := bytes.Compare(updatedNode.dataKey.compositeKey, existingNode.dataKey.compositeKey)
+ var nextNode *dataNode
+ switch c {
+ case -1:
+ nextNode = updatedNode
+ i++
+ case 0:
+ nextNode = updatedNode
+ i++
+ j++
+ case 1:
+ nextNode = existingNode
+ j++
+ }
+ if !nextNode.isDelete() {
+ bucketHashCalculator.addNextNode(nextNode)
+ }
+ }
+
+ var remainingNodes dataNodes
+ if i < len(updatedNodes) {
+ remainingNodes = updatedNodes[i:]
+ } else if j < len(existingNodes) {
+ remainingNodes = existingNodes[j:]
+ }
+
+ for _, remainingNode := range remainingNodes {
+ if !remainingNode.isDelete() {
+ bucketHashCalculator.addNextNode(remainingNode)
+ }
+ }
+ return bucketHashCalculator.computeCryptoHash()
+}
+
+// AddChangesForPersistence - method implementation for interface 'statemgmt.HashableState'
+func (stateImpl *StateImpl) AddChangesForPersistence(writeBatch *gorocksdb.WriteBatch) error {
+
+ if stateImpl.dataNodesDelta == nil {
+ return nil
+ }
+
+ if stateImpl.recomputeCryptoHash {
+ _, err := stateImpl.ComputeCryptoHash()
+ if err != nil {
+ return nil
+ }
+ }
+ stateImpl.addDataNodeChangesForPersistence(writeBatch)
+ stateImpl.addBucketNodeChangesForPersistence(writeBatch)
+ return nil
+}
+
+func (stateImpl *StateImpl) addDataNodeChangesForPersistence(writeBatch *gorocksdb.WriteBatch) {
+ openchainDB := db.GetDBHandle()
+ affectedBuckets := stateImpl.dataNodesDelta.getAffectedBuckets()
+ for _, affectedBucket := range affectedBuckets {
+ dataNodes := stateImpl.dataNodesDelta.getSortedDataNodesFor(affectedBucket)
+ for _, dataNode := range dataNodes {
+ if dataNode.isDelete() {
+ logger.Debugf("Deleting data node key = %#v", dataNode.dataKey)
+ writeBatch.DeleteCF(openchainDB.StateCF, dataNode.dataKey.getEncodedBytes())
+ } else {
+ logger.Debugf("Adding data node with value = %#v", dataNode.value)
+ writeBatch.PutCF(openchainDB.StateCF, dataNode.dataKey.getEncodedBytes(), dataNode.value)
+ }
+ }
+ }
+}
+
+func (stateImpl *StateImpl) addBucketNodeChangesForPersistence(writeBatch *gorocksdb.WriteBatch) {
+ openchainDB := db.GetDBHandle()
+ secondLastLevel := conf.getLowestLevel() - 1
+ for level := secondLastLevel; level >= 0; level-- {
+ bucketNodes := stateImpl.bucketTreeDelta.getBucketNodesAt(level)
+ for _, bucketNode := range bucketNodes {
+ if bucketNode.markedForDeletion {
+ writeBatch.DeleteCF(openchainDB.StateCF, bucketNode.bucketKey.getEncodedBytes())
+ } else {
+ writeBatch.PutCF(openchainDB.StateCF, bucketNode.bucketKey.getEncodedBytes(), bucketNode.marshal())
+ }
+ }
+ }
+}
+
+func (stateImpl *StateImpl) updateBucketCache() {
+ if stateImpl.bucketTreeDelta == nil || stateImpl.bucketTreeDelta.isEmpty() {
+ return
+ }
+ stateImpl.bucketCache.lock.Lock()
+ defer stateImpl.bucketCache.lock.Unlock()
+ secondLastLevel := conf.getLowestLevel() - 1
+ for level := 0; level <= secondLastLevel; level++ {
+ bucketNodes := stateImpl.bucketTreeDelta.getBucketNodesAt(level)
+ for _, bucketNode := range bucketNodes {
+ key := *bucketNode.bucketKey
+ if bucketNode.markedForDeletion {
+ stateImpl.bucketCache.removeWithoutLock(key)
+ } else {
+ stateImpl.bucketCache.putWithoutLock(key, bucketNode)
+ }
+ }
+ }
+}
+
+// PerfHintKeyChanged - method implementation for interface 'statemgmt.HashableState'
+func (stateImpl *StateImpl) PerfHintKeyChanged(chaincodeID string, key string) {
+ // We can create a cache. Pull all the keys for the bucket (to which given key belongs) in a separate thread
+ // This prefetching can help making method 'ComputeCryptoHash' faster.
+}
+
+// GetStateSnapshotIterator - method implementation for interface 'statemgmt.HashableState'
+func (stateImpl *StateImpl) GetStateSnapshotIterator(snapshot *gorocksdb.Snapshot) (statemgmt.StateSnapshotIterator, error) {
+ return newStateSnapshotIterator(snapshot)
+}
+
+// GetRangeScanIterator - method implementation for interface 'statemgmt.HashableState'
+func (stateImpl *StateImpl) GetRangeScanIterator(chaincodeID string, startKey string, endKey string) (statemgmt.RangeScanIterator, error) {
+ return newRangeScanIterator(chaincodeID, startKey, endKey)
+}
diff --git a/core/ledger/statemgmt/buckettree/state_impl_test.go b/core/ledger/statemgmt/buckettree/state_impl_test.go
new file mode 100644
index 00000000000..038e02b3430
--- /dev/null
+++ b/core/ledger/statemgmt/buckettree/state_impl_test.go
@@ -0,0 +1,400 @@
+/*
+Copyright IBM Corp. 2016 All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package buckettree
+
+import (
+ "testing"
+
+ "github.com/hyperledger/fabric/core/ledger/statemgmt"
+ "github.com/hyperledger/fabric/core/ledger/testutil"
+)
+
+func TestStateImpl_ComputeHash_AllInMemory_NoContents(t *testing.T) {
+ testDBWrapper.CleanDB(t)
+ stateImplTestWrapper := newStateImplTestWrapper(t)
+ hash := stateImplTestWrapper.prepareWorkingSetAndComputeCryptoHash(statemgmt.NewStateDelta())
+ testutil.AssertEquals(t, hash, nil)
+}
+
+func TestStateImpl_ComputeHash_AllInMemory_1(t *testing.T) {
+ // number of buckets at each level 26,9,3,1
+ testHasher, stateImplTestWrapper, stateDelta := createFreshDBAndInitTestStateImplWithCustomHasher(t, 26, 3)
+ testHasher.populate("chaincodeID1", "key1", 0)
+ testHasher.populate("chaincodeID2", "key2", 0)
+ testHasher.populate("chaincodeID3", "key3", 0)
+ testHasher.populate("chaincodeID4", "key4", 3)
+
+ stateDelta.Set("chaincodeID1", "key1", []byte("value1"), nil)
+ stateDelta.Set("chaincodeID2", "key2", []byte("value2"), nil)
+ stateDelta.Set("chaincodeID3", "key3", []byte("value3"), nil)
+ stateDelta.Set("chaincodeID4", "key4", []byte("value4"), nil)
+
+ rootHash := stateImplTestWrapper.prepareWorkingSetAndComputeCryptoHash(stateDelta)
+
+ expectedHashBucket3_1 := expectedBucketHashForTest(
+ []string{"chaincodeID1", "key1", "value1"},
+ []string{"chaincodeID2", "key2", "value2"},
+ []string{"chaincodeID3", "key3", "value3"},
+ )
+ expectedHashBucket3_4 := expectedBucketHashForTest(
+ []string{"chaincodeID4", "key4", "value4"},
+ )
+ expectedHash := testutil.ComputeCryptoHash(expectedHashBucket3_1, expectedHashBucket3_4)
+ testutil.AssertEquals(t, rootHash, expectedHash)
+}
+
+func TestStateImpl_ComputeHash_AllInMemory_2(t *testing.T) {
+ // number of buckets at each level 26,13,7,4,2,1
+ testHasher, stateImplTestWrapper, stateDelta := createFreshDBAndInitTestStateImplWithCustomHasher(t, 26, 2)
+ // first two buckets - meet at next level
+ testHasher.populate("chaincodeID1", "key1", 0)
+ testHasher.populate("chaincodeID2", "key2", 1)
+
+ // middle two buckets
+ testHasher.populate("chaincodeID3", "key3", 5)
+ testHasher.populate("chaincodeID4", "key4", 9)
+
+ // last two buckets - meet at next level
+ testHasher.populate("chaincodeID5", "key5", 24)
+ testHasher.populate("chaincodeID6", "key6", 25)
+
+ stateDelta.Set("chaincodeID1", "key1", []byte("value1"), nil)
+ stateDelta.Set("chaincodeID2", "key2", []byte("value2"), nil)
+ stateDelta.Set("chaincodeID3", "key3", []byte("value3"), nil)
+ stateDelta.Set("chaincodeID4", "key4", []byte("value4"), nil)
+ stateDelta.Set("chaincodeID5", "key5", []byte("value5"), nil)
+ stateDelta.Set("chaincodeID6", "key6", []byte("value6"), nil)
+
+ rootHash := stateImplTestWrapper.prepareWorkingSetAndComputeCryptoHash(stateDelta)
+
+ expectedHashBucket5_1 := expectedBucketHashForTest([]string{"chaincodeID1", "key1", "value1"})
+ expectedHashBucket5_2 := expectedBucketHashForTest([]string{"chaincodeID2", "key2", "value2"})
+ expectedHashBucket5_6 := expectedBucketHashForTest([]string{"chaincodeID3", "key3", "value3"})
+ expectedHashBucket5_10 := expectedBucketHashForTest([]string{"chaincodeID4", "key4", "value4"})
+ expectedHashBucket5_25 := expectedBucketHashForTest([]string{"chaincodeID5", "key5", "value5"})
+ expectedHashBucket5_26 := expectedBucketHashForTest([]string{"chaincodeID6", "key6", "value6"})
+
+ expectedHashBucket4_1 := testutil.ComputeCryptoHash(expectedHashBucket5_1, expectedHashBucket5_2)
+ expectedHashBucket4_13 := testutil.ComputeCryptoHash(expectedHashBucket5_25, expectedHashBucket5_26)
+
+ expectedHashBucket2_1 := testutil.ComputeCryptoHash(expectedHashBucket4_1, expectedHashBucket5_6)
+
+ expectedHashBucket1_1 := testutil.ComputeCryptoHash(expectedHashBucket2_1, expectedHashBucket5_10)
+
+ expectedHash := testutil.ComputeCryptoHash(expectedHashBucket1_1, expectedHashBucket4_13)
+ testutil.AssertEquals(t, rootHash, expectedHash)
+}
+
+func TestStateImpl_ComputeHash_DB_1(t *testing.T) {
+ // number of buckets at each level 26,9,3,1
+ testHasher, stateImplTestWrapper, stateDelta := createFreshDBAndInitTestStateImplWithCustomHasher(t, 26, 3)
+ // populate hash function such that
+ // all keys belong to a single bucket so as to test overwrite/delete scenario
+ testHasher.populate("chaincodeID1", "key1", 3)
+ testHasher.populate("chaincodeID2", "key2", 3)
+ testHasher.populate("chaincodeID3", "key3", 3)
+ testHasher.populate("chaincodeID4", "key4", 3)
+ testHasher.populate("chaincodeID5", "key5", 3)
+ testHasher.populate("chaincodeID6", "key6", 3)
+ testHasher.populate("chaincodeID7", "key7", 3)
+
+ stateDelta.Set("chaincodeID2", "key2", []byte("value2"), nil)
+ stateDelta.Set("chaincodeID3", "key3", []byte("value3"), nil)
+ stateDelta.Set("chaincodeID5", "key5", []byte("value5"), nil)
+ stateDelta.Set("chaincodeID6", "key6", []byte("value6"), nil)
+ rootHash := stateImplTestWrapper.prepareWorkingSetAndComputeCryptoHash(stateDelta)
+ stateImplTestWrapper.persistChangesAndResetInMemoryChanges()
+
+ expectedHash1 := expectedBucketHashForTest(
+ []string{"chaincodeID2", "key2", "value2"},
+ []string{"chaincodeID3", "key3", "value3"},
+ []string{"chaincodeID5", "key5", "value5"},
+ []string{"chaincodeID6", "key6", "value6"},
+ )
+ testutil.AssertEquals(t, rootHash, expectedHash1)
+
+ // modify boundary keys and a middle key
+ stateDelta = statemgmt.NewStateDelta()
+ stateDelta.Set("chaincodeID2", "key2", []byte("value2_new"), nil)
+ stateDelta.Set("chaincodeID3", "key3", []byte("value3_new"), nil)
+ stateDelta.Set("chaincodeID6", "key6", []byte("value6_new"), nil)
+ rootHash = stateImplTestWrapper.prepareWorkingSetAndComputeCryptoHash(stateDelta)
+ stateImplTestWrapper.persistChangesAndResetInMemoryChanges()
+ expectedHash2 := expectedBucketHashForTest(
+ []string{"chaincodeID2", "key2", "value2_new"},
+ []string{"chaincodeID3", "key3", "value3_new"},
+ []string{"chaincodeID5", "key5", "value5"},
+ []string{"chaincodeID6", "key6", "value6_new"},
+ )
+ testutil.AssertEquals(t, rootHash, expectedHash2)
+ testutil.AssertEquals(t, stateImplTestWrapper.get("chaincodeID2", "key2"), []byte("value2_new"))
+ testutil.AssertEquals(t, stateImplTestWrapper.get("chaincodeID3", "key3"), []byte("value3_new"))
+ testutil.AssertEquals(t, stateImplTestWrapper.get("chaincodeID6", "key6"), []byte("value6_new"))
+
+ // insert keys at boundary and in the middle
+ stateDelta = statemgmt.NewStateDelta()
+ stateDelta.Set("chaincodeID1", "key1", []byte("value1"), nil)
+ stateDelta.Set("chaincodeID4", "key4", []byte("value4"), nil)
+ stateDelta.Set("chaincodeID7", "key7", []byte("value7"), nil)
+ rootHash = stateImplTestWrapper.prepareWorkingSetAndComputeCryptoHash(stateDelta)
+ stateImplTestWrapper.persistChangesAndResetInMemoryChanges()
+ expectedHash3 := expectedBucketHashForTest(
+ []string{"chaincodeID1", "key1", "value1"},
+ []string{"chaincodeID2", "key2", "value2_new"},
+ []string{"chaincodeID3", "key3", "value3_new"},
+ []string{"chaincodeID4", "key4", "value4"},
+ []string{"chaincodeID5", "key5", "value5"},
+ []string{"chaincodeID6", "key6", "value6_new"},
+ []string{"chaincodeID7", "key7", "value7"},
+ )
+ testutil.AssertEquals(t, rootHash, expectedHash3)
+ testutil.AssertEquals(t, stateImplTestWrapper.get("chaincodeID1", "key1"), []byte("value1"))
+ testutil.AssertEquals(t, stateImplTestWrapper.get("chaincodeID4", "key4"), []byte("value4"))
+ testutil.AssertEquals(t, stateImplTestWrapper.get("chaincodeID7", "key7"), []byte("value7"))
+
+ // delete keys at a boundary and in the middle
+ stateDelta = statemgmt.NewStateDelta()
+ stateDelta.Delete("chaincodeID1", "key1", nil)
+ stateDelta.Delete("chaincodeID4", "key4", nil)
+ stateDelta.Delete("chaincodeID7", "key7", nil)
+ rootHash = stateImplTestWrapper.prepareWorkingSetAndComputeCryptoHash(stateDelta)
+ stateImplTestWrapper.persistChangesAndResetInMemoryChanges()
+ testutil.AssertEquals(t, rootHash, expectedHash2)
+ testutil.AssertNil(t, stateImplTestWrapper.get("chaincodeID1", "key1"))
+ testutil.AssertNil(t, stateImplTestWrapper.get("chaincodeID4", "key4"))
+ testutil.AssertNil(t, stateImplTestWrapper.get("chaincodeID7", "key7"))
+}
+
+func TestStateImpl_ComputeHash_DB_2(t *testing.T) {
+ // number of buckets at each level 26,13,7,4,2,1
+ testHasher, stateImplTestWrapper, stateDelta := createFreshDBAndInitTestStateImplWithCustomHasher(t, 26, 2)
+ testHasher.populate("chaincodeID1", "key1", 0)
+ testHasher.populate("chaincodeID2", "key2", 1)
+ testHasher.populate("chaincodeID3", "key3", 5)
+ testHasher.populate("chaincodeID4", "key4", 9)
+ testHasher.populate("chaincodeID5", "key5", 24)
+ testHasher.populate("chaincodeID6", "key6", 25)
+
+ stateDelta.Set("chaincodeID1", "key1", []byte("value1"), nil)
+ stateDelta.Set("chaincodeID2", "key2", []byte("value2"), nil)
+ stateDelta.Set("chaincodeID3", "key3", []byte("value3"), nil)
+ stateDelta.Set("chaincodeID4", "key4", []byte("value4"), nil)
+ stateDelta.Set("chaincodeID5", "key5", []byte("value5"), nil)
+ stateDelta.Set("chaincodeID6", "key6", []byte("value6"), nil)
+ stateImplTestWrapper.prepareWorkingSet(stateDelta)
+ // Populate DB
+ stateImplTestWrapper.persistChangesAndResetInMemoryChanges()
+
+ //////////// Test - constrcuting a new state tree simulates starting state tree when db already has some data ////////
+ stateImplTestWrapper.constructNewStateImpl()
+ rootHash := stateImplTestWrapper.computeCryptoHash()
+
+ /*************************** bucket-tree-structure ***************
+ 1 1 1 1 1 1
+ 2 1 1 1 1 1
+ 6 3 2 1 1 1
+ 10 5 3 2 1 1
+ 25 13 7 4 2 1
+ 26 13 7 4 2 1
+ *******************************************************************/
+ expectedHashBucket5_1 := expectedBucketHashForTest([]string{"chaincodeID1", "key1", "value1"})
+ expectedHashBucket5_2 := expectedBucketHashForTest([]string{"chaincodeID2", "key2", "value2"})
+ expectedHashBucket5_6 := expectedBucketHashForTest([]string{"chaincodeID3", "key3", "value3"})
+ expectedHashBucket5_10 := expectedBucketHashForTest([]string{"chaincodeID4", "key4", "value4"})
+ expectedHashBucket5_25 := expectedBucketHashForTest([]string{"chaincodeID5", "key5", "value5"})
+ expectedHashBucket5_26 := expectedBucketHashForTest([]string{"chaincodeID6", "key6", "value6"})
+ expectedHashBucket4_1 := testutil.ComputeCryptoHash(expectedHashBucket5_1, expectedHashBucket5_2)
+ expectedHashBucket4_13 := testutil.ComputeCryptoHash(expectedHashBucket5_25, expectedHashBucket5_26)
+ expectedHashBucket2_1 := testutil.ComputeCryptoHash(expectedHashBucket4_1, expectedHashBucket5_6)
+ expectedHashBucket1_1 := testutil.ComputeCryptoHash(expectedHashBucket2_1, expectedHashBucket5_10)
+ expectedHash := testutil.ComputeCryptoHash(expectedHashBucket1_1, expectedHashBucket4_13)
+ testutil.AssertEquals(t, rootHash, expectedHash)
+
+ ////////////// Test - Add a few more keys (include keys in the existing buckes and new buckets) /////////////////////
+ stateDelta = statemgmt.NewStateDelta()
+ testHasher.populate("chaincodeID7", "key7", 1)
+ testHasher.populate("chaincodeID8", "key8", 7)
+ testHasher.populate("chaincodeID9", "key9", 9)
+ testHasher.populate("chaincodeID10", "key10", 20)
+
+ stateDelta.Set("chaincodeID7", "key7", []byte("value7"), nil)
+ stateDelta.Set("chaincodeID8", "key8", []byte("value8"), nil)
+ stateDelta.Set("chaincodeID9", "key9", []byte("value9"), nil)
+ stateDelta.Set("chaincodeID10", "key10", []byte("value10"), nil)
+
+ /*************************** bucket-tree-structure after adding keys ***************
+ 1 1 1 1 1 1
+ 2 1 1 1 1 1
+ 6 3 2 1 1 1
+ 8 4 2 1 1 1
+ 10 5 3 2 1 1
+ 21 11 6 3 2 1
+ 25 13 7 4 2 1
+ 26 13 7 4 2 1
+ ***********************************************************************************/
+ rootHash = stateImplTestWrapper.prepareWorkingSetAndComputeCryptoHash(stateDelta)
+ expectedHashBucket5_2 = expectedBucketHashForTest(
+ []string{"chaincodeID2", "key2", "value2"},
+ []string{"chaincodeID7", "key7", "value7"},
+ )
+ expectedHashBucket5_8 := expectedBucketHashForTest([]string{"chaincodeID8", "key8", "value8"})
+ expectedHashBucket5_10 = expectedBucketHashForTest([]string{"chaincodeID4", "key4", "value4"},
+ []string{"chaincodeID9", "key9", "value9"})
+ expectedHashBucket5_21 := expectedBucketHashForTest([]string{"chaincodeID10", "key10", "value10"})
+ expectedHashBucket4_1 = testutil.ComputeCryptoHash(expectedHashBucket5_1, expectedHashBucket5_2)
+ expectedHashBucket3_2 := testutil.ComputeCryptoHash(expectedHashBucket5_6, expectedHashBucket5_8)
+ expectedHashBucket2_1 = testutil.ComputeCryptoHash(expectedHashBucket4_1, expectedHashBucket3_2)
+
+ expectedHashBucket1_1 = testutil.ComputeCryptoHash(expectedHashBucket2_1, expectedHashBucket5_10)
+ expectedHashBucket1_2 := testutil.ComputeCryptoHash(expectedHashBucket5_21, expectedHashBucket4_13)
+ expectedHash = testutil.ComputeCryptoHash(expectedHashBucket1_1, expectedHashBucket1_2)
+ testutil.AssertEquals(t, rootHash, expectedHash)
+ stateImplTestWrapper.persistChangesAndResetInMemoryChanges()
+
+ ////////////// Test - overwrite an existing key /////////////////////
+ stateDelta = statemgmt.NewStateDelta()
+ stateDelta.Set("chaincodeID7", "key7", []byte("value7_new"), nil)
+ rootHash = stateImplTestWrapper.prepareWorkingSetAndComputeCryptoHash(stateDelta)
+ expectedHashBucket5_2 = expectedBucketHashForTest(
+ []string{"chaincodeID2", "key2", "value2"},
+ []string{"chaincodeID7", "key7", "value7_new"},
+ )
+ expectedHashBucket4_1 = testutil.ComputeCryptoHash(expectedHashBucket5_1, expectedHashBucket5_2)
+ expectedHashBucket2_1 = testutil.ComputeCryptoHash(expectedHashBucket4_1, expectedHashBucket3_2)
+ expectedHashBucket1_1 = testutil.ComputeCryptoHash(expectedHashBucket2_1, expectedHashBucket5_10)
+ expectedHash = testutil.ComputeCryptoHash(expectedHashBucket1_1, expectedHashBucket1_2)
+ testutil.AssertEquals(t, rootHash, expectedHash)
+ stateImplTestWrapper.persistChangesAndResetInMemoryChanges()
+ testutil.AssertEquals(t, stateImplTestWrapper.get("chaincodeID7", "key7"), []byte("value7_new"))
+
+ // ////////////// Test - delete an existing key /////////////////////
+ stateDelta = statemgmt.NewStateDelta()
+ stateDelta.Delete("chaincodeID2", "key2", nil)
+ rootHash = stateImplTestWrapper.prepareWorkingSetAndComputeCryptoHash(stateDelta)
+ expectedHashBucket5_2 = expectedBucketHashForTest([]string{"chaincodeID7", "key7", "value7_new"})
+ expectedHashBucket4_1 = testutil.ComputeCryptoHash(expectedHashBucket5_1, expectedHashBucket5_2)
+ expectedHashBucket2_1 = testutil.ComputeCryptoHash(expectedHashBucket4_1, expectedHashBucket3_2)
+ expectedHashBucket1_1 = testutil.ComputeCryptoHash(expectedHashBucket2_1, expectedHashBucket5_10)
+ expectedHash = testutil.ComputeCryptoHash(expectedHashBucket1_1, expectedHashBucket1_2)
+ testutil.AssertEquals(t, rootHash, expectedHash)
+ stateImplTestWrapper.persistChangesAndResetInMemoryChanges()
+ testutil.AssertNil(t, stateImplTestWrapper.get("chaincodeID2", "key2"))
+}
+
+func TestStateImpl_ComputeHash_DB_3(t *testing.T) {
+ // simple test... not using custom hasher
+ conf = newConfig(DefaultNumBuckets, DefaultMaxGroupingAtEachLevel, fnvHash)
+ testDBWrapper.CleanDB(t)
+ stateImplTestWrapper := newStateImplTestWrapper(t)
+ stateImpl := stateImplTestWrapper.stateImpl
+ stateDelta := statemgmt.NewStateDelta()
+ stateDelta.Set("chaincode1", "key1", []byte("value1"), nil)
+ stateDelta.Set("chaincode2", "key2", []byte("value2"), nil)
+ stateDelta.Set("chaincode3", "key3", []byte("value3"), nil)
+ stateImpl.PrepareWorkingSet(stateDelta)
+ hash1 := stateImplTestWrapper.computeCryptoHash()
+ stateImplTestWrapper.persistChangesAndResetInMemoryChanges()
+
+ stateDelta = statemgmt.NewStateDelta()
+ stateDelta.Delete("chaincode1", "key1", nil)
+ stateDelta.Delete("chaincode2", "key2", nil)
+ stateDelta.Delete("chaincode3", "key3", nil)
+ stateImpl.PrepareWorkingSet(stateDelta)
+ hash2 := stateImplTestWrapper.computeCryptoHash()
+ stateImplTestWrapper.persistChangesAndResetInMemoryChanges()
+ testutil.AssertNotEquals(t, hash1, hash2)
+ testutil.AssertNil(t, hash2)
+}
+
+func TestStateImpl_DB_Changes(t *testing.T) {
+ // number of buckets at each level 26,9,3,1
+ testHasher, stateImplTestWrapper, stateDelta := createFreshDBAndInitTestStateImplWithCustomHasher(t, 26, 3)
+ // populate hash function such that
+ // ["chaincodeID1", "key1"] is bucketized to bucket 1
+ testHasher.populate("chaincodeID1", "key1", 0)
+ testHasher.populate("chaincodeID1", "key2", 0)
+ testHasher.populate("chaincodeID2", "key1", 1)
+ testHasher.populate("chaincodeID2", "key3", 3)
+ testHasher.populate("chaincodeID10", "key10", 24)
+
+ // prepare stateDelta
+ stateDelta.Set("chaincodeID1", "key1", []byte("value1"), nil)
+ stateDelta.Set("chaincodeID1", "key2", []byte("value2"), nil)
+ stateDelta.Set("chaincodeID2", "key1", []byte("value3"), nil)
+ stateDelta.Set("chaincodeID2", "key3", []byte("value4"), nil)
+
+ stateImplTestWrapper.prepareWorkingSetAndComputeCryptoHash(stateDelta)
+ stateImplTestWrapper.persistChangesAndResetInMemoryChanges()
+
+ // Read state from DB
+ testutil.AssertEquals(t, stateImplTestWrapper.get("chaincodeID1", "key1"), []byte("value1"))
+ testutil.AssertEquals(t, stateImplTestWrapper.get("chaincodeID2", "key1"), []byte("value3"))
+
+ // fetch datanode from DB
+ dataNodeFromDB, _ := fetchDataNodeFromDB(newDataKey("chaincodeID2", "key1"))
+ testutil.AssertEquals(t, dataNodeFromDB, newDataNode(newDataKey("chaincodeID2", "key1"), []byte("value3")))
+
+ //fetch non-existing data node from DB
+ dataNodeFromDB, _ = fetchDataNodeFromDB(newDataKey("chaincodeID10", "key10"))
+ t.Logf("isNIL...[%t]", dataNodeFromDB == nil)
+ testutil.AssertNil(t, dataNodeFromDB)
+
+ // fetch all data nodes from db that belong to bucket 1 at lowest level
+ dataNodesFromDB, _ := fetchDataNodesFromDBFor(newBucketKeyAtLowestLevel(1))
+ testutil.AssertContainsAll(t, dataNodesFromDB,
+ dataNodes{newDataNode(newDataKey("chaincodeID1", "key1"), []byte("value1")),
+ newDataNode(newDataKey("chaincodeID1", "key2"), []byte("value2"))})
+
+ // fetch all data nodes from db that belong to bucket 2 at lowest level
+ dataNodesFromDB, _ = fetchDataNodesFromDBFor(newBucketKeyAtLowestLevel(2))
+ testutil.AssertContainsAll(t, dataNodesFromDB,
+ dataNodes{newDataNode(newDataKey("chaincodeID2", "key1"), []byte("value3"))})
+
+ // fetch first bucket at second level
+ bucketNodeFromDB, _ := fetchBucketNodeFromDB(newBucketKey(2, 1))
+ testutil.AssertEquals(t, bucketNodeFromDB.bucketKey, newBucketKey(2, 1))
+ //check childrenCryptoHash entries in the bucket node from DB
+ testutil.AssertEquals(t, bucketNodeFromDB.childrenCryptoHash[0],
+ expectedBucketHashForTest([]string{"chaincodeID1", "key1", "value1", "key2", "value2"}))
+
+ testutil.AssertEquals(t, bucketNodeFromDB.childrenCryptoHash[1],
+ expectedBucketHashForTest([]string{"chaincodeID2", "key1", "value3"}))
+
+ testutil.AssertNil(t, bucketNodeFromDB.childrenCryptoHash[2])
+
+ // third bucket at second level should be nil
+ bucketNodeFromDB, _ = fetchBucketNodeFromDB(newBucketKey(2, 3))
+ testutil.AssertNil(t, bucketNodeFromDB)
+}
+
+func TestStateImpl_DB_EmptyArrayValues(t *testing.T) {
+ testDBWrapper.CleanDB(t)
+ stateImplTestWrapper := newStateImplTestWrapper(t)
+ stateImpl := stateImplTestWrapper.stateImpl
+ stateDelta := statemgmt.NewStateDelta()
+ stateDelta.Set("chaincode1", "key1", []byte{}, nil)
+ stateImpl.PrepareWorkingSet(stateDelta)
+ stateImplTestWrapper.persistChangesAndResetInMemoryChanges()
+ emptyBytes := stateImplTestWrapper.get("chaincode1", "key1")
+ if emptyBytes == nil || len(emptyBytes) != 0 {
+ t.Fatalf("Expected an empty byte array. found = %#v", emptyBytes)
+ }
+ nilVal := stateImplTestWrapper.get("chaincodeID3", "non-existing-key")
+ if nilVal != nil {
+ t.Fatalf("Expected a nil. found = %#v", nilVal)
+ }
+}
diff --git a/core/ledger/statemgmt/buckettree/test.yaml b/core/ledger/statemgmt/buckettree/test.yaml
new file mode 100644
index 00000000000..aa3f08aedde
--- /dev/null
+++ b/core/ledger/statemgmt/buckettree/test.yaml
@@ -0,0 +1,15 @@
+###############################################################################
+#
+# Peer section
+#
+###############################################################################
+peer:
+ # Path on the file system where peer will store data
+ fileSystemPath: /var/hyperledger/test/ledger/statemgmt/buckettree/testdb
+ledger:
+ state:
+ dataStructure:
+ name: buckettree
+ configs:
+ numBuckets: 19
+ maxGroupingAtEachLevel: 3
diff --git a/core/ledger/statemgmt/commons.go b/core/ledger/statemgmt/commons.go
new file mode 100644
index 00000000000..102cd18656a
--- /dev/null
+++ b/core/ledger/statemgmt/commons.go
@@ -0,0 +1,48 @@
+/*
+Copyright IBM Corp. 2016 All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package statemgmt
+
+import (
+ "bytes"
+
+ "github.com/op/go-logging"
+)
+
+var logger = logging.MustGetLogger("statemgmt")
+
+var stateKeyDelimiter = []byte{0x00}
+
+// ConstructCompositeKey returns a []byte that uniquely represents a given chaincodeID and key.
+// This assumes that chaincodeID does not contain a 0x00 byte, but the key may
+// TODO:enforce this restriction on chaincodeID or use length prefixing here instead of delimiter
+func ConstructCompositeKey(chaincodeID string, key string) []byte {
+ return bytes.Join([][]byte{[]byte(chaincodeID), []byte(key)}, stateKeyDelimiter)
+}
+
+// DecodeCompositeKey decodes the compositeKey constructed by ConstructCompositeKey method
+// back to the original chaincodeID and key form
+func DecodeCompositeKey(compositeKey []byte) (string, string) {
+ split := bytes.SplitN(compositeKey, stateKeyDelimiter, 2)
+ return string(split[0]), string(split[1])
+}
+
+// Copy returns a copy of given bytes
+func Copy(src []byte) []byte {
+ dest := make([]byte, len(src))
+ copy(dest, src)
+ return dest
+}
diff --git a/core/ledger/statemgmt/hashable_state.go b/core/ledger/statemgmt/hashable_state.go
new file mode 100644
index 00000000000..04bc6aa55d0
--- /dev/null
+++ b/core/ledger/statemgmt/hashable_state.go
@@ -0,0 +1,97 @@
+/*
+Copyright IBM Corp. 2016 All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package statemgmt
+
+import (
+ "github.com/tecbot/gorocksdb"
+)
+
+// HashableState - Interface that is be implemented by state management
+// Different state management implementation can be effiecient for computing crypto-hash for
+// state under different workload conditions.
+type HashableState interface {
+
+ // Initialize this gives a chance to initialize. For instance, state implementation can load some data from DB
+ Initialize(configs map[string]interface{}) error
+
+ // Get get the value from DB
+ Get(chaincodeID string, key string) ([]byte, error)
+
+ // PrepareWorkingSet passes a stateDelta that captures the changes that needs to be applied to the state
+ PrepareWorkingSet(stateDelta *StateDelta) error
+
+ // ComputeCryptoHash state implementation to compute crypto-hash of state
+ // assuming the stateDelta (passed in PrepareWorkingSet method) is to be applied
+ ComputeCryptoHash() ([]byte, error)
+
+ // AddChangesForPersistence state implementation to add all the key-value pair that it needs
+ // to persist for committing the stateDelta (passed in PrepareWorkingSet method) to DB.
+ // In addition to the information in the StateDelta, the implementation may also want to
+ // persist intermediate results for faster crypto-hash computation
+ AddChangesForPersistence(writeBatch *gorocksdb.WriteBatch) error
+
+ // ClearWorkingSet state implementation may clear any data structures that it may have constructed
+ // for computing cryptoHash and persisting the changes for the stateDelta (passed in PrepareWorkingSet method)
+ ClearWorkingSet(changesPersisted bool)
+
+ // GetStateSnapshotIterator state implementation to provide an iterator that is supposed to give
+ // All the key-value of global state. A particular implementation may need to remove additional information
+ // that the implementation keeps for faster crypto-hash computation. For instance, filter a few of the
+ // key-values or remove some data from particular key-values.
+ GetStateSnapshotIterator(snapshot *gorocksdb.Snapshot) (StateSnapshotIterator, error)
+
+ // GetRangeScanIterator - state implementation to provide an iterator that is supposed to give
+ // All the key-values for a given chaincodeID such that a return key should be lexically greater than or
+ // equal to startKey and less than or equal to endKey. If the value for startKey parameter is an empty string
+ // startKey is assumed to be the smallest key available in the db for the chaincodeID. Similarly, an empty string
+ // for endKey parameter assumes the endKey to be the greatest key available in the db for the chaincodeID
+ GetRangeScanIterator(chaincodeID string, startKey string, endKey string) (RangeScanIterator, error)
+
+ // PerfHintKeyChanged state implementation may be provided with some hints before (e.g., during tx execution)
+ // the StateDelta is prepared and passed in PrepareWorkingSet method.
+ // A state implementation may use this hint for prefetching relevant data so as if this could improve
+ // the performance of ComputeCryptoHash method (when gets called at a later time)
+ PerfHintKeyChanged(chaincodeID string, key string)
+}
+
+// StateSnapshotIterator An interface that is to be implemented by the return value of
+// GetStateSnapshotIterator method in the implementation of HashableState interface
+type StateSnapshotIterator interface {
+
+ // Next moves to next key-value. Returns true if next key-value exists
+ Next() bool
+
+ // GetRawKeyValue returns next key-value
+ GetRawKeyValue() ([]byte, []byte)
+
+ // Close releases resources occupied by the iterator
+ Close()
+}
+
+// RangeScanIterator - is to be implemented by the return value of
+// GetRangeScanIterator method in the implementation of HashableState interface
+type RangeScanIterator interface {
+
+ // Next moves to next key-value. Returns true if next key-value exists
+ Next() bool
+
+ // GetKeyValue returns next key-value
+ GetKeyValue() (string, []byte)
+
+ // Close releases resources occupied by the iterator
+ Close()
+}
diff --git a/core/ledger/statemgmt/perf_test.go b/core/ledger/statemgmt/perf_test.go
new file mode 100644
index 00000000000..66cd67ebeed
--- /dev/null
+++ b/core/ledger/statemgmt/perf_test.go
@@ -0,0 +1,42 @@
+/*
+Copyright IBM Corp. 2016 All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package statemgmt
+
+import (
+ "flag"
+ "testing"
+
+ "github.com/hyperledger/fabric/core/ledger/testutil"
+ "github.com/hyperledger/fabric/core/util"
+)
+
+func BenchmarkCryptoHash(b *testing.B) {
+ flags := flag.NewFlagSet("testParams", flag.ExitOnError)
+ numBytesPointer := flags.Int("NumBytes", -1, "Number of Bytes")
+ flags.Parse(testParams)
+ numBytes := *numBytesPointer
+ if numBytes == -1 {
+ b.Fatal("Missing value for parameter NumBytes")
+ }
+
+ randomBytes := testutil.ConstructRandomBytes(b, numBytes)
+ //b.Logf("byte size=%d, b.N=%d", len(randomBytes), b.N)
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ util.ComputeCryptoHash(randomBytes)
+ }
+}
diff --git a/core/ledger/statemgmt/pkg_test.go b/core/ledger/statemgmt/pkg_test.go
new file mode 100644
index 00000000000..73d7c6a2300
--- /dev/null
+++ b/core/ledger/statemgmt/pkg_test.go
@@ -0,0 +1,31 @@
+/*
+Copyright IBM Corp. 2016 All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package statemgmt
+
+import (
+ "os"
+ "testing"
+
+ "github.com/hyperledger/fabric/core/ledger/testutil"
+)
+
+var testParams []string
+
+func TestMain(m *testing.M) {
+ testParams = testutil.ParseTestParams()
+ os.Exit(m.Run())
+}
diff --git a/core/ledger/statemgmt/raw/state_impl.go b/core/ledger/statemgmt/raw/state_impl.go
new file mode 100644
index 00000000000..622b8884479
--- /dev/null
+++ b/core/ledger/statemgmt/raw/state_impl.go
@@ -0,0 +1,98 @@
+/*
+Copyright IBM Corp. 2016 All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package raw
+
+import (
+ "github.com/hyperledger/fabric/core/db"
+ "github.com/hyperledger/fabric/core/ledger/statemgmt"
+ "github.com/tecbot/gorocksdb"
+)
+
+// StateImpl implements raw state management. This implementation does not support computation of crypto-hash of the state.
+// It simply stores the compositeKey and value in the db
+type StateImpl struct {
+ stateDelta *statemgmt.StateDelta
+}
+
+// NewRawState constructs new instance of raw state
+func NewRawState() *StateImpl {
+ return &StateImpl{}
+}
+
+// Initialize - method implementation for interface 'statemgmt.HashableState'
+func (impl *StateImpl) Initialize(configs map[string]interface{}) error {
+ return nil
+}
+
+// Get - method implementation for interface 'statemgmt.HashableState'
+func (impl *StateImpl) Get(chaincodeID string, key string) ([]byte, error) {
+ compositeKey := statemgmt.ConstructCompositeKey(chaincodeID, key)
+ openchainDB := db.GetDBHandle()
+ return openchainDB.GetFromStateCF(compositeKey)
+}
+
+// PrepareWorkingSet - method implementation for interface 'statemgmt.HashableState'
+func (impl *StateImpl) PrepareWorkingSet(stateDelta *statemgmt.StateDelta) error {
+ impl.stateDelta = stateDelta
+ return nil
+}
+
+// ClearWorkingSet - method implementation for interface 'statemgmt.HashableState'
+func (impl *StateImpl) ClearWorkingSet(changesPersisted bool) {
+ impl.stateDelta = nil
+}
+
+// ComputeCryptoHash - method implementation for interface 'statemgmt.HashableState'
+func (impl *StateImpl) ComputeCryptoHash() ([]byte, error) {
+ return nil, nil
+}
+
+// AddChangesForPersistence - method implementation for interface 'statemgmt.HashableState'
+func (impl *StateImpl) AddChangesForPersistence(writeBatch *gorocksdb.WriteBatch) error {
+ delta := impl.stateDelta
+ if delta == nil {
+ return nil
+ }
+ openchainDB := db.GetDBHandle()
+ updatedChaincodeIds := delta.GetUpdatedChaincodeIds(false)
+ for _, updatedChaincodeID := range updatedChaincodeIds {
+ updates := delta.GetUpdates(updatedChaincodeID)
+ for updatedKey, value := range updates {
+ compositeKey := statemgmt.ConstructCompositeKey(updatedChaincodeID, updatedKey)
+ if value.IsDelete() {
+ writeBatch.DeleteCF(openchainDB.StateCF, compositeKey)
+ } else {
+ writeBatch.PutCF(openchainDB.StateCF, compositeKey, value.GetValue())
+ }
+ }
+ }
+ return nil
+}
+
+// PerfHintKeyChanged - method implementation for interface 'statemgmt.HashableState'
+func (impl *StateImpl) PerfHintKeyChanged(chaincodeID string, key string) {
+}
+
+// GetStateSnapshotIterator - method implementation for interface 'statemgmt.HashableState'
+func (impl *StateImpl) GetStateSnapshotIterator(snapshot *gorocksdb.Snapshot) (statemgmt.StateSnapshotIterator, error) {
+ panic("Not a full-fledged state implementation. Implemented only for measuring best-case performance benchmark")
+}
+
+// GetRangeScanIterator - method implementation for interface 'statemgmt.HashableState'
+func (impl *StateImpl) GetRangeScanIterator(chaincodeID string, startKey string, endKey string) (statemgmt.RangeScanIterator, error) {
+ panic("Not a full-fledged state implementation. Implemented only for measuring best-case performance benchmark")
+}
diff --git a/core/ledger/statemgmt/raw/state_impl_test.go b/core/ledger/statemgmt/raw/state_impl_test.go
new file mode 100644
index 00000000000..734c4c5d1d8
--- /dev/null
+++ b/core/ledger/statemgmt/raw/state_impl_test.go
@@ -0,0 +1,25 @@
+/*
+Copyright IBM Corp. 2016 All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package raw
+
+import "testing"
+
+func TestSkipAll(t *testing.T) {
+ t.Skip(`No tests in this package for now - This package is only experimental as yet.
+ The primary use of this package as yet is to measure what maximum performance we can get from a very basic state implementation
+ that doe not implement any hash computation function`)
+}
diff --git a/core/ledger/statemgmt/state/composite_range_scan_iterator.go b/core/ledger/statemgmt/state/composite_range_scan_iterator.go
new file mode 100644
index 00000000000..548aa6f71ba
--- /dev/null
+++ b/core/ledger/statemgmt/state/composite_range_scan_iterator.go
@@ -0,0 +1,90 @@
+/*
+Copyright IBM Corp. 2016 All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package state
+
+import (
+ "github.com/hyperledger/fabric/core/ledger/statemgmt"
+)
+
+// CompositeRangeScanIterator - an implementation of interface 'statemgmt.RangeScanIterator'
+// This provides a wrapper on top of more than one underlying iterators
+type CompositeRangeScanIterator struct {
+ itrs []statemgmt.RangeScanIterator
+ currentItrNumber int
+}
+
+func newCompositeRangeScanIterator(
+ txDeltaItr *statemgmt.StateDeltaIterator,
+ batchDeltaItr *statemgmt.StateDeltaIterator,
+ implItr statemgmt.RangeScanIterator) statemgmt.RangeScanIterator {
+ itrs := make([]statemgmt.RangeScanIterator, 3)
+ itrs[0] = txDeltaItr
+ itrs[1] = batchDeltaItr
+ itrs[2] = implItr
+ return &CompositeRangeScanIterator{itrs, 0}
+}
+
+// Next - see interface 'statemgmt.RangeScanIterator' for details
+// The specific implementation below starts from first underlying iterator and
+// after exhausting the first underlying iterator, move to the second underlying iterator.
+// The implementation repeats this until last underlying iterator has been exhausted
+// In addition, the key-value from an underlying iterator are skipped if the key is found
+// in any of the preceding iterators
+func (itr *CompositeRangeScanIterator) Next() bool {
+ currentItrNumber := itr.currentItrNumber
+ currentItr := itr.itrs[currentItrNumber]
+ logger.Debugf("Operating on iterator number = %d", currentItrNumber)
+ keyAvailable := currentItr.Next()
+ for keyAvailable {
+ key, _ := currentItr.GetKeyValue()
+ logger.Debugf("Retrieved key = %s", key)
+ skipKey := false
+ for i := currentItrNumber - 1; i >= 0; i-- {
+ logger.Debugf("Evaluating key = %s in itr number = %d. currentItrNumber = %d", key, i, currentItrNumber)
+ previousItr := itr.itrs[i]
+ if previousItr.(*statemgmt.StateDeltaIterator).ContainsKey(key) {
+ skipKey = true
+ break
+ }
+ }
+ if skipKey {
+ logger.Debugf("Skipping key = %s", key)
+ keyAvailable = currentItr.Next()
+ continue
+ }
+ break
+ }
+
+ if keyAvailable || currentItrNumber == 2 {
+ logger.Debug("Returning for current key")
+ return keyAvailable
+ }
+
+ logger.Debug("Moving to next iterator")
+ itr.currentItrNumber++
+ return itr.Next()
+}
+
+// GetKeyValue - see interface 'statemgmt.RangeScanIterator' for details
+func (itr *CompositeRangeScanIterator) GetKeyValue() (string, []byte) {
+ return itr.itrs[itr.currentItrNumber].GetKeyValue()
+}
+
+// Close - see interface 'statemgmt.RangeScanIterator' for details
+func (itr *CompositeRangeScanIterator) Close() {
+ itr.itrs[2].Close()
+}
diff --git a/core/ledger/statemgmt/state/composite_range_scan_iterator_test.go b/core/ledger/statemgmt/state/composite_range_scan_iterator_test.go
new file mode 100644
index 00000000000..2ee11083b47
--- /dev/null
+++ b/core/ledger/statemgmt/state/composite_range_scan_iterator_test.go
@@ -0,0 +1,160 @@
+/*
+Copyright IBM Corp. 2016 All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package state
+
+import (
+ "testing"
+
+ "github.com/hyperledger/fabric/core/ledger/statemgmt"
+)
+
+func TestCompositeRangeScanIterator(t *testing.T) {
+ stateTestWrapper, state := createFreshDBAndConstructState(t)
+
+ // commit initial test state to db
+ state.TxBegin("txUuid")
+ state.Set("chaincode1", "key1", []byte("value1"))
+ state.Set("chaincode1", "key2", []byte("value2"))
+ state.Set("chaincode1", "key3", []byte("value3"))
+ state.Set("chaincode1", "key4", []byte("value4"))
+ state.Set("chaincode1", "key5", []byte("value5"))
+ state.Set("chaincode1", "key6", []byte("value6"))
+ state.Set("chaincode1", "key7", []byte("value7"))
+
+ state.Set("chaincode2", "key1", []byte("value1"))
+ state.Set("chaincode2", "key2", []byte("value2"))
+ state.Set("chaincode2", "key3", []byte("value3"))
+ state.TxFinish("txUuid", true)
+ stateTestWrapper.persistAndClearInMemoryChanges(0)
+
+ // change and delete a few existing keys and add a new key
+ state.TxBegin("txUUID")
+ state.Set("chaincode1", "key3", []byte("value3_new"))
+ state.Set("chaincode1", "key4", []byte("value4_new"))
+ state.Set("chaincode1", "key5", []byte("value5_new"))
+ state.Delete("chaincode1", "key6")
+ state.Set("chaincode1", "key8", []byte("value8_new"))
+ state.TxFinish("txUUID", true)
+
+ // change and delete a few existing keys and add a new key, in the on-going tx
+ state.TxBegin("txUUID")
+ state.Set("chaincode1", "key3", []byte("value3_new_new"))
+ state.Delete("chaincode1", "key4")
+ state.Set("chaincode1", "key9", []byte("value9_new_new"))
+
+ // Test with committed=false //////////////////////////
+ /////////////////////////////////////////////////////
+ // test keys between key2 and key8
+ itr, _ := state.GetRangeScanIterator("chaincode1", "key2", "key8", false)
+ statemgmt.AssertIteratorContains(t, itr,
+ map[string][]byte{
+ // from current tx
+ "key3": []byte("value3_new_new"),
+
+ // from current batch
+ "key5": []byte("value5_new"),
+ "key8": []byte("value8_new"),
+
+ // from committed results
+ "key2": []byte("value2"),
+ "key7": []byte("value7"),
+ })
+ itr.Close()
+
+ // test with an empty startKey
+ itr, _ = state.GetRangeScanIterator("chaincode1", "", "key8", false)
+ statemgmt.AssertIteratorContains(t, itr,
+ map[string][]byte{
+ // from current tx
+ "key3": []byte("value3_new_new"),
+
+ // from current batch
+ "key5": []byte("value5_new"),
+ "key8": []byte("value8_new"),
+
+ // from committed results
+ "key1": []byte("value1"),
+ "key2": []byte("value2"),
+ "key7": []byte("value7"),
+ })
+ itr.Close()
+
+ // test with an empty endKey
+ itr, _ = state.GetRangeScanIterator("chaincode1", "", "", false)
+ statemgmt.AssertIteratorContains(t, itr,
+ map[string][]byte{
+ // from current tx
+ "key3": []byte("value3_new_new"),
+ "key9": []byte("value9_new_new"),
+
+ // from current batch
+ "key5": []byte("value5_new"),
+ "key8": []byte("value8_new"),
+
+ // from committed results
+ "key1": []byte("value1"),
+ "key2": []byte("value2"),
+ "key7": []byte("value7"),
+ })
+ itr.Close()
+
+ // Test with committed=true //////////////////////////
+ /////////////////////////////////////////////////////
+ // test keys between key2 and key8
+ itr, _ = state.GetRangeScanIterator("chaincode1", "key2", "key8", true)
+ statemgmt.AssertIteratorContains(t, itr,
+ map[string][]byte{
+ // from committed results
+ "key2": []byte("value2"),
+ "key3": []byte("value3"),
+ "key4": []byte("value4"),
+ "key5": []byte("value5"),
+ "key6": []byte("value6"),
+ "key7": []byte("value7"),
+ })
+ itr.Close()
+
+ // test with an empty startKey
+ itr, _ = state.GetRangeScanIterator("chaincode1", "", "key8", true)
+ statemgmt.AssertIteratorContains(t, itr,
+ map[string][]byte{
+ // from committed results
+ "key1": []byte("value1"),
+ "key2": []byte("value2"),
+ "key3": []byte("value3"),
+ "key4": []byte("value4"),
+ "key5": []byte("value5"),
+ "key6": []byte("value6"),
+ "key7": []byte("value7"),
+ })
+ itr.Close()
+
+ // test with an empty endKey
+ itr, _ = state.GetRangeScanIterator("chaincode1", "", "", true)
+ statemgmt.AssertIteratorContains(t, itr,
+ map[string][]byte{
+ // from committed results
+ "key1": []byte("value1"),
+ "key2": []byte("value2"),
+ "key3": []byte("value3"),
+ "key4": []byte("value4"),
+ "key5": []byte("value5"),
+ "key6": []byte("value6"),
+ "key7": []byte("value7"),
+ })
+ itr.Close()
+}
diff --git a/core/ledger/statemgmt/state/config.go b/core/ledger/statemgmt/state/config.go
new file mode 100644
index 00000000000..acc920fb253
--- /dev/null
+++ b/core/ledger/statemgmt/state/config.go
@@ -0,0 +1,54 @@
+/*
+Copyright IBM Corp. 2016 All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package state
+
+import (
+ "fmt"
+ "sync"
+
+ "github.com/spf13/viper"
+)
+
+var loadConfigOnce sync.Once
+
+var stateImplName string
+var stateImplConfigs map[string]interface{}
+var deltaHistorySize int
+
+func initConfig() {
+ loadConfigOnce.Do(func() { loadConfig() })
+}
+
+func loadConfig() {
+ logger.Info("Loading configurations...")
+ stateImplName = viper.GetString("ledger.state.dataStructure.name")
+ stateImplConfigs = viper.GetStringMap("ledger.state.dataStructure.configs")
+ deltaHistorySize = viper.GetInt("ledger.state.deltaHistorySize")
+ logger.Infof("Configurations loaded. stateImplName=[%s], stateImplConfigs=%s, deltaHistorySize=[%d]",
+ stateImplName, stateImplConfigs, deltaHistorySize)
+
+ if len(stateImplName) == 0 {
+ stateImplName = detaultStateImpl
+ stateImplConfigs = nil
+ } else if stateImplName != "buckettree" && stateImplName != "trie" && stateImplName != "raw" {
+ panic(fmt.Errorf("Error during initialization of state implementation. State data structure '%s' is not valid.", stateImplName))
+ }
+
+ if deltaHistorySize < 0 {
+ panic(fmt.Errorf("Delta history size must be greater than or equal to 0. Current value is %d.", deltaHistorySize))
+ }
+}
diff --git a/core/ledger/statemgmt/state/pkg_test.go b/core/ledger/statemgmt/state/pkg_test.go
new file mode 100644
index 00000000000..a6cdfc9aa89
--- /dev/null
+++ b/core/ledger/statemgmt/state/pkg_test.go
@@ -0,0 +1,76 @@
+/*
+Copyright IBM Corp. 2016 All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package state
+
+import (
+ "os"
+ "testing"
+
+ "github.com/hyperledger/fabric/core/db"
+ "github.com/hyperledger/fabric/core/ledger/statemgmt"
+ "github.com/hyperledger/fabric/core/ledger/testutil"
+ "github.com/tecbot/gorocksdb"
+)
+
+var testDBWrapper = db.NewTestDBWrapper()
+
+func TestMain(m *testing.M) {
+ testutil.SetupTestConfig()
+ os.Exit(m.Run())
+}
+
+func createFreshDBAndConstructState(t *testing.T) (*stateTestWrapper, *State) {
+ testDBWrapper.CleanDB(t)
+ stateTestWrapper := newStateTestWrapper(t)
+ return stateTestWrapper, stateTestWrapper.state
+}
+
+type stateTestWrapper struct {
+ t *testing.T
+ state *State
+}
+
+func newStateTestWrapper(t *testing.T) *stateTestWrapper {
+ return &stateTestWrapper{t, NewState()}
+}
+
+func (testWrapper *stateTestWrapper) get(chaincodeID string, key string, committed bool) []byte {
+ value, err := testWrapper.state.Get(chaincodeID, key, committed)
+ testutil.AssertNoError(testWrapper.t, err, "Error while getting state")
+ return value
+}
+
+func (testWrapper *stateTestWrapper) getSnapshot() *StateSnapshot {
+ dbSnapshot := db.GetDBHandle().GetSnapshot()
+ stateSnapshot, err := testWrapper.state.GetSnapshot(0, dbSnapshot)
+ testutil.AssertNoError(testWrapper.t, err, "Error during creation of state snapshot")
+ return stateSnapshot
+}
+
+func (testWrapper *stateTestWrapper) persistAndClearInMemoryChanges(blockNumber uint64) {
+ writeBatch := gorocksdb.NewWriteBatch()
+ defer writeBatch.Destroy()
+ testWrapper.state.AddChangesForPersistence(blockNumber, writeBatch)
+ testDBWrapper.WriteToDB(testWrapper.t, writeBatch)
+ testWrapper.state.ClearInMemoryChanges(true)
+}
+
+func (testWrapper *stateTestWrapper) fetchStateDeltaFromDB(blockNumber uint64) *statemgmt.StateDelta {
+ delta := statemgmt.NewStateDelta()
+ delta.Unmarshal(testDBWrapper.GetFromStateDeltaCF(testWrapper.t, encodeStateDeltaKey(blockNumber)))
+ return delta
+}
diff --git a/core/ledger/statemgmt/state/state.go b/core/ledger/statemgmt/state/state.go
new file mode 100644
index 00000000000..dd7838ba96b
--- /dev/null
+++ b/core/ledger/statemgmt/state/state.go
@@ -0,0 +1,358 @@
+/*
+Copyright IBM Corp. 2016 All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package state
+
+import (
+ "encoding/binary"
+ "fmt"
+
+ "github.com/hyperledger/fabric/core/db"
+ "github.com/hyperledger/fabric/core/ledger/statemgmt"
+ "github.com/hyperledger/fabric/core/ledger/statemgmt/buckettree"
+ "github.com/hyperledger/fabric/core/ledger/statemgmt/raw"
+ "github.com/hyperledger/fabric/core/ledger/statemgmt/trie"
+ "github.com/op/go-logging"
+ "github.com/tecbot/gorocksdb"
+)
+
+var logger = logging.MustGetLogger("state")
+
+const detaultStateImpl = "buckettree"
+
+var stateImpl statemgmt.HashableState
+
+// State structure for maintaining world state.
+// This encapsulates a particular implementation for managing the state persistence
+// This is not thread safe
+type State struct {
+ stateImpl statemgmt.HashableState
+ stateDelta *statemgmt.StateDelta
+ currentTxStateDelta *statemgmt.StateDelta
+ currentTxUUID string
+ txStateDeltaHash map[string][]byte
+ updateStateImpl bool
+ historyStateDeltaSize uint64
+}
+
+// NewState constructs a new State. This Initializes encapsulated state implementation
+func NewState() *State {
+ initConfig()
+ logger.Infof("Initializing state implementation [%s]", stateImplName)
+ switch stateImplName {
+ case "buckettree":
+ stateImpl = buckettree.NewStateImpl()
+ case "trie":
+ stateImpl = trie.NewStateTrie()
+ case "raw":
+ stateImpl = raw.NewRawState()
+ default:
+ panic("Should not reach here. Configs should have checked for the stateImplName being a valid names ")
+ }
+ err := stateImpl.Initialize(stateImplConfigs)
+ if err != nil {
+ panic(fmt.Errorf("Error during initialization of state implementation: %s", err))
+ }
+ return &State{stateImpl, statemgmt.NewStateDelta(), statemgmt.NewStateDelta(), "", make(map[string][]byte),
+ false, uint64(deltaHistorySize)}
+}
+
+// TxBegin marks begin of a new tx. If a tx is already in progress, this call panics
+func (state *State) TxBegin(txUUID string) {
+ logger.Debugf("txBegin() for txUuid [%s]", txUUID)
+ if state.txInProgress() {
+ panic(fmt.Errorf("A tx [%s] is already in progress. Received call for begin of another tx [%s]", state.currentTxUUID, txUUID))
+ }
+ state.currentTxUUID = txUUID
+}
+
+// TxFinish marks the completion of on-going tx. If txUUID is not same as of the on-going tx, this call panics
+func (state *State) TxFinish(txUUID string, txSuccessful bool) {
+ logger.Debugf("txFinish() for txUuid [%s], txSuccessful=[%t]", txUUID, txSuccessful)
+ if state.currentTxUUID != txUUID {
+ panic(fmt.Errorf("Different Uuid in tx-begin [%s] and tx-finish [%s]", state.currentTxUUID, txUUID))
+ }
+ if txSuccessful {
+ if !state.currentTxStateDelta.IsEmpty() {
+ logger.Debugf("txFinish() for txUuid [%s] merging state changes", txUUID)
+ state.stateDelta.ApplyChanges(state.currentTxStateDelta)
+ state.txStateDeltaHash[txUUID] = state.currentTxStateDelta.ComputeCryptoHash()
+ state.updateStateImpl = true
+ } else {
+ state.txStateDeltaHash[txUUID] = nil
+ }
+ }
+ state.currentTxStateDelta = statemgmt.NewStateDelta()
+ state.currentTxUUID = ""
+}
+
+func (state *State) txInProgress() bool {
+ return state.currentTxUUID != ""
+}
+
+// Get returns state for chaincodeID and key. If committed is false, this first looks in memory and if missing,
+// pulls from db. If committed is true, this pulls from the db only.
+func (state *State) Get(chaincodeID string, key string, committed bool) ([]byte, error) {
+ if !committed {
+ valueHolder := state.currentTxStateDelta.Get(chaincodeID, key)
+ if valueHolder != nil {
+ return valueHolder.GetValue(), nil
+ }
+ valueHolder = state.stateDelta.Get(chaincodeID, key)
+ if valueHolder != nil {
+ return valueHolder.GetValue(), nil
+ }
+ }
+ return state.stateImpl.Get(chaincodeID, key)
+}
+
+// GetRangeScanIterator returns an iterator to get all the keys (and values) between startKey and endKey
+// (assuming lexical order of the keys) for a chaincodeID.
+func (state *State) GetRangeScanIterator(chaincodeID string, startKey string, endKey string, committed bool) (statemgmt.RangeScanIterator, error) {
+ stateImplItr, err := state.stateImpl.GetRangeScanIterator(chaincodeID, startKey, endKey)
+ if err != nil {
+ return nil, err
+ }
+
+ if committed {
+ return stateImplItr, nil
+ }
+ return newCompositeRangeScanIterator(
+ statemgmt.NewStateDeltaRangeScanIterator(state.currentTxStateDelta, chaincodeID, startKey, endKey),
+ statemgmt.NewStateDeltaRangeScanIterator(state.stateDelta, chaincodeID, startKey, endKey),
+ stateImplItr), nil
+}
+
+// Set sets state to given value for chaincodeID and key. Does not immideatly writes to DB
+func (state *State) Set(chaincodeID string, key string, value []byte) error {
+ logger.Debugf("set() chaincodeID=[%s], key=[%s], value=[%#v]", chaincodeID, key, value)
+ if !state.txInProgress() {
+ panic("State can be changed only in context of a tx.")
+ }
+
+ // Check if a previous value is already set in the state delta
+ if state.currentTxStateDelta.IsUpdatedValueSet(chaincodeID, key) {
+ // No need to bother looking up the previous value as we will not
+ // set it again. Just pass nil
+ state.currentTxStateDelta.Set(chaincodeID, key, value, nil)
+ } else {
+ // Need to lookup the previous value
+ previousValue, err := state.Get(chaincodeID, key, true)
+ if err != nil {
+ return err
+ }
+ state.currentTxStateDelta.Set(chaincodeID, key, value, previousValue)
+ }
+
+ return nil
+}
+
+// Delete tracks the deletion of state for chaincodeID and key. Does not immideatly writes to DB
+func (state *State) Delete(chaincodeID string, key string) error {
+ logger.Debugf("delete() chaincodeID=[%s], key=[%s]", chaincodeID, key)
+ if !state.txInProgress() {
+ panic("State can be changed only in context of a tx.")
+ }
+
+ // Check if a previous value is already set in the state delta
+ if state.currentTxStateDelta.IsUpdatedValueSet(chaincodeID, key) {
+ // No need to bother looking up the previous value as we will not
+ // set it again. Just pass nil
+ state.currentTxStateDelta.Delete(chaincodeID, key, nil)
+ } else {
+ // Need to lookup the previous value
+ previousValue, err := state.Get(chaincodeID, key, true)
+ if err != nil {
+ return err
+ }
+ state.currentTxStateDelta.Delete(chaincodeID, key, previousValue)
+ }
+
+ return nil
+}
+
+// CopyState copies all the key-values from sourceChaincodeID to destChaincodeID
+func (state *State) CopyState(sourceChaincodeID string, destChaincodeID string) error {
+ itr, err := state.GetRangeScanIterator(sourceChaincodeID, "", "", true)
+ defer itr.Close()
+ if err != nil {
+ return err
+ }
+ for itr.Next() {
+ k, v := itr.GetKeyValue()
+ err := state.Set(destChaincodeID, k, v)
+ if err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// GetMultipleKeys returns the values for the multiple keys.
+func (state *State) GetMultipleKeys(chaincodeID string, keys []string, committed bool) ([][]byte, error) {
+ var values [][]byte
+ for _, k := range keys {
+ v, err := state.Get(chaincodeID, k, committed)
+ if err != nil {
+ return nil, err
+ }
+ values = append(values, v)
+ }
+ return values, nil
+}
+
+// SetMultipleKeys sets the values for the multiple keys.
+func (state *State) SetMultipleKeys(chaincodeID string, kvs map[string][]byte) error {
+ for k, v := range kvs {
+ err := state.Set(chaincodeID, k, v)
+ if err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// GetHash computes new state hash if the stateDelta is to be applied.
+// Recomputes only if stateDelta has changed after most recent call to this function
+func (state *State) GetHash() ([]byte, error) {
+ logger.Debug("Enter - GetHash()")
+ if state.updateStateImpl {
+ logger.Debug("updating stateImpl with working-set")
+ state.stateImpl.PrepareWorkingSet(state.stateDelta)
+ state.updateStateImpl = false
+ }
+ hash, err := state.stateImpl.ComputeCryptoHash()
+ if err != nil {
+ return nil, err
+ }
+ logger.Debug("Exit - GetHash()")
+ return hash, nil
+}
+
+// GetTxStateDeltaHash return the hash of the StateDelta
+func (state *State) GetTxStateDeltaHash() map[string][]byte {
+ return state.txStateDeltaHash
+}
+
+// ClearInMemoryChanges remove from memory all the changes to state
+func (state *State) ClearInMemoryChanges(changesPersisted bool) {
+ state.stateDelta = statemgmt.NewStateDelta()
+ state.txStateDeltaHash = make(map[string][]byte)
+ state.stateImpl.ClearWorkingSet(changesPersisted)
+}
+
+// getStateDelta get changes in state after most recent call to method clearInMemoryChanges
+func (state *State) getStateDelta() *statemgmt.StateDelta {
+ return state.stateDelta
+}
+
+// GetSnapshot returns a snapshot of the global state for the current block. stateSnapshot.Release()
+// must be called once you are done.
+func (state *State) GetSnapshot(blockNumber uint64, dbSnapshot *gorocksdb.Snapshot) (*StateSnapshot, error) {
+ return newStateSnapshot(blockNumber, dbSnapshot)
+}
+
+// FetchStateDeltaFromDB fetches the StateDelta corrsponding to given blockNumber
+func (state *State) FetchStateDeltaFromDB(blockNumber uint64) (*statemgmt.StateDelta, error) {
+ stateDeltaBytes, err := db.GetDBHandle().GetFromStateDeltaCF(encodeStateDeltaKey(blockNumber))
+ if err != nil {
+ return nil, err
+ }
+ if stateDeltaBytes == nil {
+ return nil, nil
+ }
+ stateDelta := statemgmt.NewStateDelta()
+ stateDelta.Unmarshal(stateDeltaBytes)
+ return stateDelta, nil
+}
+
+// AddChangesForPersistence adds key-value pairs to writeBatch
+func (state *State) AddChangesForPersistence(blockNumber uint64, writeBatch *gorocksdb.WriteBatch) {
+ logger.Debug("state.addChangesForPersistence()...start")
+ if state.updateStateImpl {
+ state.stateImpl.PrepareWorkingSet(state.stateDelta)
+ state.updateStateImpl = false
+ }
+ state.stateImpl.AddChangesForPersistence(writeBatch)
+
+ serializedStateDelta := state.stateDelta.Marshal()
+ cf := db.GetDBHandle().StateDeltaCF
+ logger.Debugf("Adding state-delta corresponding to block number[%d]", blockNumber)
+ writeBatch.PutCF(cf, encodeStateDeltaKey(blockNumber), serializedStateDelta)
+ if blockNumber >= state.historyStateDeltaSize {
+ blockNumberToDelete := blockNumber - state.historyStateDeltaSize
+ logger.Debugf("Deleting state-delta corresponding to block number[%d]", blockNumberToDelete)
+ writeBatch.DeleteCF(cf, encodeStateDeltaKey(blockNumberToDelete))
+ } else {
+ logger.Debugf("Not deleting previous state-delta. Block number [%d] is smaller than historyStateDeltaSize [%d]",
+ blockNumber, state.historyStateDeltaSize)
+ }
+ logger.Debug("state.addChangesForPersistence()...finished")
+}
+
+// ApplyStateDelta applies already prepared stateDelta to the existing state.
+// This is an in memory change only. state.CommitStateDelta must be used to
+// commit the state to the DB. This method is to be used in state transfer.
+func (state *State) ApplyStateDelta(delta *statemgmt.StateDelta) {
+ state.stateDelta = delta
+ state.updateStateImpl = true
+}
+
+// CommitStateDelta commits the changes from state.ApplyStateDelta to the
+// DB.
+func (state *State) CommitStateDelta() error {
+ if state.updateStateImpl {
+ state.stateImpl.PrepareWorkingSet(state.stateDelta)
+ state.updateStateImpl = false
+ }
+
+ writeBatch := gorocksdb.NewWriteBatch()
+ defer writeBatch.Destroy()
+ state.stateImpl.AddChangesForPersistence(writeBatch)
+ opt := gorocksdb.NewDefaultWriteOptions()
+ defer opt.Destroy()
+ return db.GetDBHandle().DB.Write(opt, writeBatch)
+}
+
+// DeleteState deletes ALL state keys/values from the DB. This is generally
+// only used during state synchronization when creating a new state from
+// a snapshot.
+func (state *State) DeleteState() error {
+ state.ClearInMemoryChanges(false)
+ err := db.GetDBHandle().DeleteState()
+ if err != nil {
+ logger.Errorf("Error deleting state: %s", err)
+ }
+ return err
+}
+
+func encodeStateDeltaKey(blockNumber uint64) []byte {
+ return encodeUint64(blockNumber)
+}
+
+func decodeStateDeltaKey(dbkey []byte) uint64 {
+ return decodeToUint64(dbkey)
+}
+
+func encodeUint64(number uint64) []byte {
+ bytes := make([]byte, 8)
+ binary.BigEndian.PutUint64(bytes, number)
+ return bytes
+}
+
+func decodeToUint64(bytes []byte) uint64 {
+ return binary.BigEndian.Uint64(bytes)
+}
diff --git a/core/ledger/statemgmt/state/state_snapshot.go b/core/ledger/statemgmt/state/state_snapshot.go
new file mode 100644
index 00000000000..5a9839650b9
--- /dev/null
+++ b/core/ledger/statemgmt/state/state_snapshot.go
@@ -0,0 +1,60 @@
+/*
+Copyright IBM Corp. 2016 All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package state
+
+import (
+ "github.com/hyperledger/fabric/core/ledger/statemgmt"
+ "github.com/tecbot/gorocksdb"
+)
+
+// StateSnapshot encapsulates StateSnapshotIterator given by actual state implementation and the db snapshot
+type StateSnapshot struct {
+ blockNumber uint64
+ stateImplItr statemgmt.StateSnapshotIterator
+ dbSnapshot *gorocksdb.Snapshot
+}
+
+// newStateSnapshot creates a new snapshot of the global state for the current block.
+func newStateSnapshot(blockNumber uint64, dbSnapshot *gorocksdb.Snapshot) (*StateSnapshot, error) {
+ itr, err := stateImpl.GetStateSnapshotIterator(dbSnapshot)
+ if err != nil {
+ return nil, err
+ }
+ snapshot := &StateSnapshot{blockNumber, itr, dbSnapshot}
+ return snapshot, nil
+}
+
+// Release the snapshot. This MUST be called when you are done with this resouce.
+func (ss *StateSnapshot) Release() {
+ ss.stateImplItr.Close()
+ ss.dbSnapshot.Release()
+}
+
+// Next moves the iterator to the next key/value pair in the state
+func (ss *StateSnapshot) Next() bool {
+ return ss.stateImplItr.Next()
+}
+
+// GetRawKeyValue returns the raw bytes for the key and value at the current iterator position
+func (ss *StateSnapshot) GetRawKeyValue() ([]byte, []byte) {
+ return ss.stateImplItr.GetRawKeyValue()
+}
+
+// GetBlockNumber returns the blocknumber associated with this global state snapshot
+func (ss *StateSnapshot) GetBlockNumber() uint64 {
+ return ss.blockNumber
+}
diff --git a/core/ledger/statemgmt/state/state_snapshot_test.go b/core/ledger/statemgmt/state/state_snapshot_test.go
new file mode 100644
index 00000000000..46a3a90c490
--- /dev/null
+++ b/core/ledger/statemgmt/state/state_snapshot_test.go
@@ -0,0 +1,63 @@
+/*
+Copyright IBM Corp. 2016 All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package state
+
+import (
+ "testing"
+
+ "github.com/hyperledger/fabric/core/ledger/testutil"
+)
+
+func TestStateSnapshot(t *testing.T) {
+ stateTestWrapper, state := createFreshDBAndConstructState(t)
+ // insert keys
+ state.TxBegin("txUuid")
+ state.Set("chaincodeID1", "key1", []byte("value1"))
+ state.Set("chaincodeID2", "key2", []byte("value2"))
+ state.Set("chaincodeID3", "key3", []byte("value3"))
+ state.Set("chaincodeID4", "key4", []byte("value4"))
+ state.Set("chaincodeID5", "key5", []byte("value5"))
+ state.Set("chaincodeID6", "key6", []byte("value6"))
+ state.TxFinish("txUuid", true)
+
+ stateTestWrapper.persistAndClearInMemoryChanges(0)
+ testutil.AssertEquals(t, stateTestWrapper.get("chaincodeID5", "key5", true), []byte("value5"))
+
+ // take db snapeshot
+ stateSnapshot := stateTestWrapper.getSnapshot()
+
+ // delete keys
+ state.TxBegin("txUuid")
+ state.Delete("chaincodeID1", "key1")
+ state.Delete("chaincodeID2", "key2")
+ state.Delete("chaincodeID3", "key3")
+ state.Delete("chaincodeID4", "key4")
+ state.Delete("chaincodeID5", "key5")
+ state.Delete("chaincodeID6", "key6")
+ state.TxFinish("txUuid", true)
+ stateTestWrapper.persistAndClearInMemoryChanges(0)
+ //check that the key is deleted
+ testutil.AssertNil(t, stateTestWrapper.get("chaincodeID5", "key5", true))
+
+ numKeys := 0
+ for stateSnapshot.Next() {
+ key, value := stateSnapshot.GetRawKeyValue()
+ t.Logf("key=[%s], value=[%s]", string(key), string(value))
+ numKeys++
+ }
+ testutil.AssertEquals(t, numKeys, 6)
+}
diff --git a/core/ledger/statemgmt/state/state_test.go b/core/ledger/statemgmt/state/state_test.go
new file mode 100644
index 00000000000..9c27eddedbe
--- /dev/null
+++ b/core/ledger/statemgmt/state/state_test.go
@@ -0,0 +1,171 @@
+/*
+Copyright IBM Corp. 2016 All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package state
+
+import (
+ "testing"
+
+ "github.com/hyperledger/fabric/core/ledger/testutil"
+)
+
+func TestStateChanges(t *testing.T) {
+ stateTestWrapper, state := createFreshDBAndConstructState(t)
+ // add keys
+ state.TxBegin("txUuid")
+ state.Set("chaincode1", "key1", []byte("value1"))
+ state.Set("chaincode1", "key2", []byte("value2"))
+ state.TxFinish("txUuid", true)
+ //chehck in-memory
+ testutil.AssertEquals(t, stateTestWrapper.get("chaincode1", "key1", false), []byte("value1"))
+ testutil.AssertNil(t, stateTestWrapper.get("chaincode1", "key1", true))
+
+ delta := state.getStateDelta()
+ // save to db
+ stateTestWrapper.persistAndClearInMemoryChanges(0)
+ testutil.AssertEquals(t, stateTestWrapper.get("chaincode1", "key1", true), []byte("value1"))
+ testutil.AssertEquals(t, stateTestWrapper.fetchStateDeltaFromDB(0), delta)
+
+ // make changes when data is already in db
+ state.TxBegin("txUuid")
+ state.Set("chaincode1", "key1", []byte("new_value1"))
+ state.TxFinish("txUuid", true)
+ testutil.AssertEquals(t, stateTestWrapper.get("chaincode1", "key1", false), []byte("new_value1"))
+
+ state.TxBegin("txUuid")
+ state.Delete("chaincode1", "key2")
+ state.TxFinish("txUuid", true)
+ testutil.AssertNil(t, stateTestWrapper.get("chaincode1", "key2", false))
+
+ state.TxBegin("txUuid")
+ state.Set("chaincode2", "key3", []byte("value3"))
+ state.Set("chaincode2", "key4", []byte("value4"))
+ state.TxFinish("txUuid", true)
+
+ delta = state.getStateDelta()
+ stateTestWrapper.persistAndClearInMemoryChanges(1)
+ testutil.AssertEquals(t, stateTestWrapper.fetchStateDeltaFromDB(1), delta)
+
+ testutil.AssertEquals(t, stateTestWrapper.get("chaincode1", "key1", true), []byte("new_value1"))
+ testutil.AssertNil(t, stateTestWrapper.get("chaincode1", "key2", true))
+ testutil.AssertEquals(t, stateTestWrapper.get("chaincode2", "key3", true), []byte("value3"))
+}
+
+func TestStateTxBehavior(t *testing.T) {
+ stateTestWrapper, state := createFreshDBAndConstructState(t)
+ if state.txInProgress() {
+ t.Fatalf("No tx should be reported to be in progress")
+ }
+
+ // set state in a successful tx
+ state.TxBegin("txUuid")
+ state.Set("chaincode1", "key1", []byte("value1"))
+ state.Set("chaincode2", "key2", []byte("value2"))
+ testutil.AssertEquals(t, stateTestWrapper.get("chaincode1", "key1", false), []byte("value1"))
+ state.TxFinish("txUuid", true)
+ testutil.AssertEquals(t, stateTestWrapper.get("chaincode1", "key1", false), []byte("value1"))
+
+ // set state in a failed tx
+ state.TxBegin("txUuid1")
+ state.Set("chaincode1", "key1", []byte("value1_new"))
+ state.Set("chaincode2", "key2", []byte("value2_new"))
+ testutil.AssertEquals(t, stateTestWrapper.get("chaincode1", "key1", false), []byte("value1_new"))
+ state.TxFinish("txUuid1", false)
+ //older state should be available
+ testutil.AssertEquals(t, stateTestWrapper.get("chaincode1", "key1", false), []byte("value1"))
+
+ // delete state in a successful tx
+ state.TxBegin("txUuid2")
+ state.Delete("chaincode1", "key1")
+ testutil.AssertNil(t, stateTestWrapper.get("chaincode1", "key1", false))
+ state.TxFinish("txUuid2", true)
+ testutil.AssertNil(t, stateTestWrapper.get("chaincode1", "key1", false))
+
+ // // delete state in a failed tx
+ state.TxBegin("txUuid2")
+ state.Delete("chaincode2", "key2")
+ testutil.AssertNil(t, stateTestWrapper.get("chaincode2", "key2", false))
+ state.TxFinish("txUuid2", false)
+ testutil.AssertEquals(t, stateTestWrapper.get("chaincode2", "key2", false), []byte("value2"))
+}
+
+func TestStateTxWrongCallCausePanic_1(t *testing.T) {
+ _, state := createFreshDBAndConstructState(t)
+ defer testutil.AssertPanic(t, "A panic should occur when a set state is invoked with out calling a tx-begin")
+ state.Set("chaincodeID1", "key1", []byte("value1"))
+}
+
+func TestStateTxWrongCallCausePanic_2(t *testing.T) {
+ _, state := createFreshDBAndConstructState(t)
+ defer testutil.AssertPanic(t, "A panic should occur when a tx-begin is invoked before tx-finish for on-going tx")
+ state.TxBegin("txUuid")
+ state.TxBegin("anotherUuid")
+}
+
+func TestStateTxWrongCallCausePanic_3(t *testing.T) {
+ _, state := createFreshDBAndConstructState(t)
+ defer testutil.AssertPanic(t, "A panic should occur when Uuid for tx-begin and tx-finish ends")
+ state.TxBegin("txUuid")
+ state.TxFinish("anotherUuid", true)
+}
+
+func TestDeleteState(t *testing.T) {
+
+ stateTestWrapper, state := createFreshDBAndConstructState(t)
+
+ // Add keys
+ state.TxBegin("txUuid")
+ state.Set("chaincode1", "key1", []byte("value1"))
+ state.Set("chaincode1", "key2", []byte("value2"))
+ state.TxFinish("txUuid", true)
+ state.getStateDelta()
+ stateTestWrapper.persistAndClearInMemoryChanges(0)
+
+ // confirm keys are present
+ testutil.AssertEquals(t, stateTestWrapper.get("chaincode1", "key1", true), []byte("value1"))
+ testutil.AssertEquals(t, stateTestWrapper.get("chaincode1", "key2", true), []byte("value2"))
+
+ // Delete the State
+ err := state.DeleteState()
+ if err != nil {
+ t.Fatalf("Error deleting the state: %s", err)
+ }
+
+ // confirm the values are empty
+ testutil.AssertNil(t, stateTestWrapper.get("chaincode1", "key1", false))
+ testutil.AssertNil(t, stateTestWrapper.get("chaincode1", "key2", false))
+ testutil.AssertNil(t, stateTestWrapper.get("chaincode1", "key1", true))
+ testutil.AssertNil(t, stateTestWrapper.get("chaincode1", "key2", true))
+
+ // Confirm that we can now store new stuff in the state
+ state.TxBegin("txUuid")
+ state.Set("chaincode1", "key1", []byte("value1"))
+ state.Set("chaincode1", "key2", []byte("value2"))
+ state.TxFinish("txUuid", true)
+ state.getStateDelta()
+ stateTestWrapper.persistAndClearInMemoryChanges(1)
+
+ // confirm keys are present
+ testutil.AssertEquals(t, stateTestWrapper.get("chaincode1", "key1", true), []byte("value1"))
+ testutil.AssertEquals(t, stateTestWrapper.get("chaincode1", "key2", true), []byte("value2"))
+}
+
+func TestStateDeltaSizeSetting(t *testing.T) {
+ _, state := createFreshDBAndConstructState(t)
+ if state.historyStateDeltaSize != 500 {
+ t.Fatalf("Error reading historyStateDeltaSize. Expected 500, but got %d", state.historyStateDeltaSize)
+ }
+}
diff --git a/core/ledger/statemgmt/state/test.yaml b/core/ledger/statemgmt/state/test.yaml
new file mode 100644
index 00000000000..a3d6accdef6
--- /dev/null
+++ b/core/ledger/statemgmt/state/test.yaml
@@ -0,0 +1,22 @@
+###############################################################################
+#
+# Peer section
+#
+###############################################################################
+peer:
+ # Path on the file system where peer will store data
+ fileSystemPath: /var/hyperledger/test/ledge/statemgmt/state/testdb
+
+ledger:
+
+ state:
+
+ # Control the number state deltas that are maintained. This takes additional
+ # disk space, but allow the state to be rolled backwards and forwards
+ # without the need to replay transactions.
+ deltaHistorySize: 500
+ dataStructure:
+ name: buckettree
+ configs:
+ numBuckets: 10009
+ maxGroupingAtEachLevel: 10
diff --git a/core/ledger/statemgmt/state_delta.go b/core/ledger/statemgmt/state_delta.go
new file mode 100644
index 00000000000..9f42722117b
--- /dev/null
+++ b/core/ledger/statemgmt/state_delta.go
@@ -0,0 +1,366 @@
+/*
+Copyright IBM Corp. 2016 All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package statemgmt
+
+import (
+ "bytes"
+ "fmt"
+ "sort"
+
+ "github.com/golang/protobuf/proto"
+ "github.com/hyperledger/fabric/core/util"
+)
+
+// StateDelta holds the changes to existing state. This struct is used for holding the uncommitted changes during execution of a tx-batch
+// Also, to be used for transferring the state to another peer in chunks
+type StateDelta struct {
+ ChaincodeStateDeltas map[string]*ChaincodeStateDelta
+ // RollBackwards allows one to contol whether this delta will roll the state
+ // forwards or backwards.
+ RollBackwards bool
+}
+
+// NewStateDelta constructs an empty StateDelta struct
+func NewStateDelta() *StateDelta {
+ return &StateDelta{make(map[string]*ChaincodeStateDelta), false}
+}
+
+// Get get the state from delta if exists
+func (stateDelta *StateDelta) Get(chaincodeID string, key string) *UpdatedValue {
+ // TODO Cache?
+ chaincodeStateDelta, ok := stateDelta.ChaincodeStateDeltas[chaincodeID]
+ if ok {
+ return chaincodeStateDelta.get(key)
+ }
+ return nil
+}
+
+// Set sets state value for a key
+func (stateDelta *StateDelta) Set(chaincodeID string, key string, value, previousValue []byte) {
+ chaincodeStateDelta := stateDelta.getOrCreateChaincodeStateDelta(chaincodeID)
+ chaincodeStateDelta.set(key, value, previousValue)
+ return
+}
+
+// Delete deletes a key from the state
+func (stateDelta *StateDelta) Delete(chaincodeID string, key string, previousValue []byte) {
+ chaincodeStateDelta := stateDelta.getOrCreateChaincodeStateDelta(chaincodeID)
+ chaincodeStateDelta.remove(key, previousValue)
+ return
+}
+
+// IsUpdatedValueSet returns true if a update value is already set for
+// the given chaincode ID and key.
+func (stateDelta *StateDelta) IsUpdatedValueSet(chaincodeID, key string) bool {
+ chaincodeStateDelta, ok := stateDelta.ChaincodeStateDeltas[chaincodeID]
+ if !ok {
+ return false
+ }
+ if _, ok := chaincodeStateDelta.UpdatedKVs[key]; ok {
+ return true
+ }
+ return false
+}
+
+// ApplyChanges merges another delta - if a key is present in both, the value of the existing key is overwritten
+func (stateDelta *StateDelta) ApplyChanges(anotherStateDelta *StateDelta) {
+ for chaincodeID, chaincodeStateDelta := range anotherStateDelta.ChaincodeStateDeltas {
+ existingChaincodeStateDelta, existingChaincode := stateDelta.ChaincodeStateDeltas[chaincodeID]
+ for key, valueHolder := range chaincodeStateDelta.UpdatedKVs {
+ var previousValue []byte
+ if existingChaincode {
+ existingUpdateValue, existingUpdate := existingChaincodeStateDelta.UpdatedKVs[key]
+ if existingUpdate {
+ // The existing state delta already has an updated value for this key.
+ previousValue = existingUpdateValue.PreviousValue
+ } else {
+ // Use the previous value set in the new state delta
+ previousValue = valueHolder.PreviousValue
+ }
+ } else {
+ // Use the previous value set in the new state delta
+ previousValue = valueHolder.PreviousValue
+ }
+
+ if valueHolder.IsDelete() {
+ stateDelta.Delete(chaincodeID, key, previousValue)
+ } else {
+ stateDelta.Set(chaincodeID, key, valueHolder.Value, previousValue)
+ }
+ }
+ }
+}
+
+// IsEmpty checks whether StateDelta contains any data
+func (stateDelta *StateDelta) IsEmpty() bool {
+ return len(stateDelta.ChaincodeStateDeltas) == 0
+}
+
+// GetUpdatedChaincodeIds return the chaincodeIDs that are prepsent in the delta
+// If sorted is true, the method return chaincodeIDs in lexicographical sorted order
+func (stateDelta *StateDelta) GetUpdatedChaincodeIds(sorted bool) []string {
+ updatedChaincodeIds := make([]string, len(stateDelta.ChaincodeStateDeltas))
+ i := 0
+ for k := range stateDelta.ChaincodeStateDeltas {
+ updatedChaincodeIds[i] = k
+ i++
+ }
+ if sorted {
+ sort.Strings(updatedChaincodeIds)
+ }
+ return updatedChaincodeIds
+}
+
+// GetUpdates returns changes associated with given chaincodeId
+func (stateDelta *StateDelta) GetUpdates(chaincodeID string) map[string]*UpdatedValue {
+ chaincodeStateDelta := stateDelta.ChaincodeStateDeltas[chaincodeID]
+ if chaincodeStateDelta == nil {
+ return nil
+ }
+ return chaincodeStateDelta.UpdatedKVs
+}
+
+func (stateDelta *StateDelta) getOrCreateChaincodeStateDelta(chaincodeID string) *ChaincodeStateDelta {
+ chaincodeStateDelta, ok := stateDelta.ChaincodeStateDeltas[chaincodeID]
+ if !ok {
+ chaincodeStateDelta = newChaincodeStateDelta(chaincodeID)
+ stateDelta.ChaincodeStateDeltas[chaincodeID] = chaincodeStateDelta
+ }
+ return chaincodeStateDelta
+}
+
+// ComputeCryptoHash computes crypto-hash for the data held
+// returns nil if no data is present
+func (stateDelta *StateDelta) ComputeCryptoHash() []byte {
+ if stateDelta.IsEmpty() {
+ return nil
+ }
+ var buffer bytes.Buffer
+ sortedChaincodeIds := stateDelta.GetUpdatedChaincodeIds(true)
+ for _, chaincodeID := range sortedChaincodeIds {
+ buffer.WriteString(chaincodeID)
+ chaincodeStateDelta := stateDelta.ChaincodeStateDeltas[chaincodeID]
+ sortedKeys := chaincodeStateDelta.getSortedKeys()
+ for _, key := range sortedKeys {
+ buffer.WriteString(key)
+ updatedValue := chaincodeStateDelta.get(key)
+ if !updatedValue.IsDelete() {
+ buffer.Write(updatedValue.Value)
+ }
+ }
+ }
+ hashingContent := buffer.Bytes()
+ logger.Debugf("computing hash on %#v", hashingContent)
+ return util.ComputeCryptoHash(hashingContent)
+}
+
+//ChaincodeStateDelta maintains state for a chaincode
+type ChaincodeStateDelta struct {
+ ChaincodeID string
+ UpdatedKVs map[string]*UpdatedValue
+}
+
+func newChaincodeStateDelta(chaincodeID string) *ChaincodeStateDelta {
+ return &ChaincodeStateDelta{chaincodeID, make(map[string]*UpdatedValue)}
+}
+
+func (chaincodeStateDelta *ChaincodeStateDelta) get(key string) *UpdatedValue {
+ // TODO Cache?
+ return chaincodeStateDelta.UpdatedKVs[key]
+}
+
+func (chaincodeStateDelta *ChaincodeStateDelta) set(key string, updatedValue, previousValue []byte) {
+ updatedKV, ok := chaincodeStateDelta.UpdatedKVs[key]
+ if ok {
+ // Key already exists, just set the updated value
+ updatedKV.Value = updatedValue
+ } else {
+ // New key. Create a new entry in the map
+ chaincodeStateDelta.UpdatedKVs[key] = &UpdatedValue{updatedValue, previousValue}
+ }
+}
+
+func (chaincodeStateDelta *ChaincodeStateDelta) remove(key string, previousValue []byte) {
+ updatedKV, ok := chaincodeStateDelta.UpdatedKVs[key]
+ if ok {
+ // Key already exists, just set the previous value
+ updatedKV.Value = nil
+ } else {
+ // New key. Create a new entry in the map
+ chaincodeStateDelta.UpdatedKVs[key] = &UpdatedValue{nil, previousValue}
+ }
+}
+
+func (chaincodeStateDelta *ChaincodeStateDelta) hasChanges() bool {
+ return len(chaincodeStateDelta.UpdatedKVs) > 0
+}
+
+func (chaincodeStateDelta *ChaincodeStateDelta) getSortedKeys() []string {
+ updatedKeys := []string{}
+ for k := range chaincodeStateDelta.UpdatedKVs {
+ updatedKeys = append(updatedKeys, k)
+ }
+ sort.Strings(updatedKeys)
+ logger.Debugf("Sorted keys = %#v", updatedKeys)
+ return updatedKeys
+}
+
+// UpdatedValue holds the value for a key
+type UpdatedValue struct {
+ Value []byte
+ PreviousValue []byte
+}
+
+// IsDelete checks whether the key was deleted
+func (updatedValue *UpdatedValue) IsDelete() bool {
+ return updatedValue.Value == nil
+}
+
+// GetValue returns the value
+func (updatedValue *UpdatedValue) GetValue() []byte {
+ return updatedValue.Value
+}
+
+// GetPreviousValue returns the previous value
+func (updatedValue *UpdatedValue) GetPreviousValue() []byte {
+ return updatedValue.PreviousValue
+}
+
+// marshalling / Unmarshalling code
+// We need to revisit the following when we define proto messages
+// for state related structures for transporting. May be we can
+// completely get rid of custom marshalling / Unmarshalling of a state delta
+
+// Marshal serializes the StateDelta
+func (stateDelta *StateDelta) Marshal() (b []byte) {
+ buffer := proto.NewBuffer([]byte{})
+ err := buffer.EncodeVarint(uint64(len(stateDelta.ChaincodeStateDeltas)))
+ if err != nil {
+ // in protobuf code the error return is always nil
+ panic(fmt.Errorf("This error should not occure: %s", err))
+ }
+ for chaincodeID, chaincodeStateDelta := range stateDelta.ChaincodeStateDeltas {
+ buffer.EncodeStringBytes(chaincodeID)
+ chaincodeStateDelta.marshal(buffer)
+ }
+ b = buffer.Bytes()
+ return
+}
+
+func (chaincodeStateDelta *ChaincodeStateDelta) marshal(buffer *proto.Buffer) {
+ err := buffer.EncodeVarint(uint64(len(chaincodeStateDelta.UpdatedKVs)))
+ if err != nil {
+ panic(fmt.Errorf("This error should not occur: %s", err))
+ }
+ for key, valueHolder := range chaincodeStateDelta.UpdatedKVs {
+ err = buffer.EncodeStringBytes(key)
+ if err != nil {
+ panic(fmt.Errorf("This error should not occur: %s", err))
+ }
+ chaincodeStateDelta.marshalValueWithMarker(buffer, valueHolder.Value)
+ chaincodeStateDelta.marshalValueWithMarker(buffer, valueHolder.PreviousValue)
+ }
+ return
+}
+
+func (chaincodeStateDelta *ChaincodeStateDelta) marshalValueWithMarker(buffer *proto.Buffer, value []byte) {
+ if value == nil {
+ // Just add a marker that the value is nil
+ err := buffer.EncodeVarint(uint64(0))
+ if err != nil {
+ panic(fmt.Errorf("This error should not occur: %s", err))
+ }
+ return
+ }
+ err := buffer.EncodeVarint(uint64(1))
+ if err != nil {
+ panic(fmt.Errorf("This error should not occur: %s", err))
+ }
+ // If the value happen to be an empty byte array, it would appear as a nil during
+ // deserialization - see method 'unmarshalValueWithMarker'
+ err = buffer.EncodeRawBytes(value)
+ if err != nil {
+ panic(fmt.Errorf("This error should not occur: %s", err))
+ }
+}
+
+// Unmarshal deserializes StateDelta
+func (stateDelta *StateDelta) Unmarshal(bytes []byte) error {
+ buffer := proto.NewBuffer(bytes)
+ size, err := buffer.DecodeVarint()
+ if err != nil {
+ return fmt.Errorf("Error unmarashaling size: %s", err)
+ }
+ stateDelta.ChaincodeStateDeltas = make(map[string]*ChaincodeStateDelta, size)
+ for i := uint64(0); i < size; i++ {
+ chaincodeID, err := buffer.DecodeStringBytes()
+ if err != nil {
+ return fmt.Errorf("Error unmarshaling chaincodeID : %s", err)
+ }
+ chaincodeStateDelta := newChaincodeStateDelta(chaincodeID)
+ err = chaincodeStateDelta.unmarshal(buffer)
+ if err != nil {
+ return fmt.Errorf("Error unmarshalling chaincodeStateDelta : %s", err)
+ }
+ stateDelta.ChaincodeStateDeltas[chaincodeID] = chaincodeStateDelta
+ }
+
+ return nil
+}
+
+func (chaincodeStateDelta *ChaincodeStateDelta) unmarshal(buffer *proto.Buffer) error {
+ size, err := buffer.DecodeVarint()
+ if err != nil {
+ return fmt.Errorf("Error unmarshaling state delta: %s", err)
+ }
+ chaincodeStateDelta.UpdatedKVs = make(map[string]*UpdatedValue, size)
+ for i := uint64(0); i < size; i++ {
+ key, err := buffer.DecodeStringBytes()
+ if err != nil {
+ return fmt.Errorf("Error unmarshaling state delta : %s", err)
+ }
+ value, err := chaincodeStateDelta.unmarshalValueWithMarker(buffer)
+ if err != nil {
+ return fmt.Errorf("Error unmarshaling state delta : %s", err)
+ }
+ previousValue, err := chaincodeStateDelta.unmarshalValueWithMarker(buffer)
+ if err != nil {
+ return fmt.Errorf("Error unmarshaling state delta : %s", err)
+ }
+ chaincodeStateDelta.UpdatedKVs[key] = &UpdatedValue{value, previousValue}
+ }
+ return nil
+}
+
+func (chaincodeStateDelta *ChaincodeStateDelta) unmarshalValueWithMarker(buffer *proto.Buffer) ([]byte, error) {
+ valueMarker, err := buffer.DecodeVarint()
+ if err != nil {
+ return nil, fmt.Errorf("Error unmarshaling state delta : %s", err)
+ }
+ if valueMarker == 0 {
+ return nil, nil
+ }
+ value, err := buffer.DecodeRawBytes(false)
+ if err != nil {
+ return nil, fmt.Errorf("Error unmarhsaling state delta : %s", err)
+ }
+ // protobuff makes an empty []byte into a nil. So, assigning an empty byte array explicitly
+ if value == nil {
+ value = []byte{}
+ }
+ return value, nil
+}
diff --git a/core/ledger/statemgmt/state_delta_iterator.go b/core/ledger/statemgmt/state_delta_iterator.go
new file mode 100644
index 00000000000..eb9ca645017
--- /dev/null
+++ b/core/ledger/statemgmt/state_delta_iterator.go
@@ -0,0 +1,75 @@
+/*
+Copyright IBM Corp. 2016 All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package statemgmt
+
+// StateDeltaIterator - An iterator implementation over state-delta
+type StateDeltaIterator struct {
+ updates map[string]*UpdatedValue
+ relevantKeys []string
+ currentKeyIndex int
+ done bool
+}
+
+// NewStateDeltaRangeScanIterator - return an iterator for performing a range scan over a state-delta object
+func NewStateDeltaRangeScanIterator(delta *StateDelta, chaincodeID string, startKey string, endKey string) *StateDeltaIterator {
+ updates := delta.GetUpdates(chaincodeID)
+ return &StateDeltaIterator{updates, retrieveRelevantKeys(updates, startKey, endKey), -1, false}
+}
+
+func retrieveRelevantKeys(updates map[string]*UpdatedValue, startKey string, endKey string) []string {
+ relevantKeys := []string{}
+ if updates == nil {
+ return relevantKeys
+ }
+ for k, v := range updates {
+ if k >= startKey && (endKey == "" || k <= endKey) && !v.IsDelete() {
+ relevantKeys = append(relevantKeys, k)
+ }
+ }
+ return relevantKeys
+}
+
+// Next - see interface 'RangeScanIterator' for details
+func (itr *StateDeltaIterator) Next() bool {
+ itr.currentKeyIndex++
+ if itr.currentKeyIndex < len(itr.relevantKeys) {
+ return true
+ }
+ itr.currentKeyIndex--
+ itr.done = true
+ return false
+}
+
+// GetKeyValue - see interface 'RangeScanIterator' for details
+func (itr *StateDeltaIterator) GetKeyValue() (string, []byte) {
+ if itr.done {
+ logger.Warning("Iterator used after it has been exhausted. Last retrieved value will be returned")
+ }
+ key := itr.relevantKeys[itr.currentKeyIndex]
+ value := itr.updates[key].GetValue()
+ return key, value
+}
+
+// Close - see interface 'RangeScanIterator' for details
+func (itr *StateDeltaIterator) Close() {
+}
+
+// ContainsKey - checks wether the given key is present in the state-delta
+func (itr *StateDeltaIterator) ContainsKey(key string) bool {
+ _, ok := itr.updates[key]
+ return ok
+}
diff --git a/core/ledger/statemgmt/state_delta_iterator_test.go b/core/ledger/statemgmt/state_delta_iterator_test.go
new file mode 100644
index 00000000000..6219c99cb5e
--- /dev/null
+++ b/core/ledger/statemgmt/state_delta_iterator_test.go
@@ -0,0 +1,65 @@
+/*
+Copyright IBM Corp. 2016 All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package statemgmt
+
+import (
+ "testing"
+)
+
+func TestStateDeltaIteratorTest(t *testing.T) {
+ delta := NewStateDelta()
+ delta.Set("chaincodeID1", "key1", []byte("value1"), nil)
+ delta.Set("chaincodeID1", "key2", []byte("value2"), nil)
+ delta.Set("chaincodeID1", "key3", []byte("value3"), nil)
+ delta.Set("chaincodeID1", "key4", []byte("value4"), nil)
+ delta.Set("chaincodeID1", "key5", []byte("value5"), nil)
+ delta.Set("chaincodeID1", "key6", []byte("value6"), nil)
+ delta.Delete("chaincodeID1", "key3", nil)
+
+ delta.Set("chaincodeID2", "key7", []byte("value7"), nil)
+ delta.Set("chaincodeID2", "key8", []byte("value8"), nil)
+
+ // test a range
+ itr := NewStateDeltaRangeScanIterator(delta, "chaincodeID1", "key2", "key5")
+ AssertIteratorContains(t, itr,
+ map[string][]byte{
+ "key2": []byte("value2"),
+ "key4": []byte("value4"),
+ "key5": []byte("value5"),
+ })
+
+ // test with empty start key
+ itr = NewStateDeltaRangeScanIterator(delta, "chaincodeID1", "", "key5")
+ AssertIteratorContains(t, itr,
+ map[string][]byte{
+ "key1": []byte("value1"),
+ "key2": []byte("value2"),
+ "key4": []byte("value4"),
+ "key5": []byte("value5"),
+ })
+
+ // test with empty end key
+ itr = NewStateDeltaRangeScanIterator(delta, "chaincodeID1", "", "")
+ AssertIteratorContains(t, itr,
+ map[string][]byte{
+ "key1": []byte("value1"),
+ "key2": []byte("value2"),
+ "key4": []byte("value4"),
+ "key5": []byte("value5"),
+ "key6": []byte("value6"),
+ })
+}
diff --git a/core/ledger/statemgmt/state_delta_test.go b/core/ledger/statemgmt/state_delta_test.go
new file mode 100644
index 00000000000..1213b728396
--- /dev/null
+++ b/core/ledger/statemgmt/state_delta_test.go
@@ -0,0 +1,78 @@
+/*
+Copyright IBM Corp. 2016 All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package statemgmt
+
+import (
+ "testing"
+
+ "github.com/hyperledger/fabric/core/ledger/testutil"
+)
+
+func TestStateDeltaMarshalling(t *testing.T) {
+ stateDelta := NewStateDelta()
+ stateDelta.Set("chaincode1", "key1", []byte("value1"), nil)
+ stateDelta.Set("chaincode2", "key2", []byte("value2"), nil)
+ stateDelta.Delete("chaincode3", "key3", nil)
+
+ by := stateDelta.Marshal()
+ t.Logf("length of marshalled bytes = [%d]", len(by))
+ stateDelta1 := NewStateDelta()
+ stateDelta1.Unmarshal(by)
+
+ testutil.AssertEquals(t, stateDelta1, stateDelta)
+}
+
+func TestStateDeltaCryptoHash(t *testing.T) {
+ stateDelta := NewStateDelta()
+
+ testutil.AssertNil(t, stateDelta.ComputeCryptoHash())
+
+ stateDelta.Set("chaincodeID1", "key2", []byte("value2"), nil)
+ stateDelta.Set("chaincodeID1", "key1", []byte("value1"), nil)
+ stateDelta.Set("chaincodeID2", "key2", []byte("value2"), nil)
+ stateDelta.Set("chaincodeID2", "key1", []byte("value1"), nil)
+ testutil.AssertEquals(t, stateDelta.ComputeCryptoHash(), testutil.ComputeCryptoHash([]byte("chaincodeID1key1value1key2value2chaincodeID2key1value1key2value2")))
+
+ stateDelta.Delete("chaincodeID2", "key1", nil)
+ testutil.AssertEquals(t, stateDelta.ComputeCryptoHash(), testutil.ComputeCryptoHash([]byte("chaincodeID1key1value1key2value2chaincodeID2key1key2value2")))
+}
+
+func TestStateDeltaEmptyArrayValue(t *testing.T) {
+ stateDelta := NewStateDelta()
+ stateDelta.Set("chaincode1", "key1", []byte("value1"), nil)
+ stateDelta.Set("chaincode2", "key2", []byte{}, nil)
+ stateDelta.Set("chaincode3", "key3", nil, nil)
+ stateDelta.Set("chaincode4", "", []byte("value4"), nil)
+
+ by := stateDelta.Marshal()
+ t.Logf("length of marshalled bytes = [%d]", len(by))
+ stateDelta1 := NewStateDelta()
+ stateDelta1.Unmarshal(by)
+
+ v := stateDelta1.Get("chaincode2", "key2")
+ if v.GetValue() == nil || len(v.GetValue()) > 0 {
+ t.Fatalf("An empty array expected. found = %#v", v)
+ }
+
+ v = stateDelta1.Get("chaincode3", "key3")
+ if v.GetValue() != nil {
+ t.Fatalf("Nil value expected. found = %#v", v)
+ }
+
+ v = stateDelta1.Get("chaincode4", "")
+ testutil.AssertEquals(t, v.GetValue(), []byte("value4"))
+}
diff --git a/core/ledger/statemgmt/test_exports.go b/core/ledger/statemgmt/test_exports.go
new file mode 100644
index 00000000000..46951ebcdc3
--- /dev/null
+++ b/core/ledger/statemgmt/test_exports.go
@@ -0,0 +1,76 @@
+/*
+Copyright IBM Corp. 2016 All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package statemgmt
+
+import (
+ "fmt"
+ "math/rand"
+ "strconv"
+ "testing"
+ "time"
+
+ "github.com/hyperledger/fabric/core/ledger/testutil"
+)
+
+// AssertIteratorContains - tests wether the iterator (itr) contains expected results (provided in map)
+func AssertIteratorContains(t *testing.T, itr RangeScanIterator, expected map[string][]byte) {
+ count := 0
+ actual := make(map[string][]byte)
+ for itr.Next() {
+ count++
+ k, v := itr.GetKeyValue()
+ actual[k] = v
+ }
+
+ t.Logf("Results from iterator: %s", actual)
+ testutil.AssertEquals(t, count, len(expected))
+ for k, v := range expected {
+ testutil.AssertEquals(t, actual[k], v)
+ }
+}
+
+// ConstructRandomStateDelta creates a random state delta for testing
+func ConstructRandomStateDelta(
+ t testing.TB,
+ chaincodeIDPrefix string,
+ numChaincodes int,
+ maxKeySuffix int,
+ numKeysToInsert int,
+ kvSize int) *StateDelta {
+ delta := NewStateDelta()
+ s2 := rand.NewSource(time.Now().UnixNano())
+ r2 := rand.New(s2)
+
+ for i := 0; i < numKeysToInsert; i++ {
+ chaincodeID := chaincodeIDPrefix + "_" + strconv.Itoa(r2.Intn(numChaincodes))
+ key := "key_" + strconv.Itoa(r2.Intn(maxKeySuffix))
+ valueSize := kvSize - len(key)
+ if valueSize < 1 {
+ panic(fmt.Errorf("valueSize cannot be less than one. ValueSize=%d", valueSize))
+ }
+ value := testutil.ConstructRandomBytes(t, valueSize)
+ delta.Set(chaincodeID, key, value, nil)
+ }
+
+ for _, chaincodeDelta := range delta.ChaincodeStateDeltas {
+ sortedKeys := chaincodeDelta.getSortedKeys()
+ smallestKey := sortedKeys[0]
+ largestKey := sortedKeys[len(sortedKeys)-1]
+ t.Logf("chaincode=%s, numKeys=%d, smallestKey=%s, largestKey=%s", chaincodeDelta.ChaincodeID, len(sortedKeys), smallestKey, largestKey)
+ }
+ return delta
+}
diff --git a/core/ledger/statemgmt/trie/byteTrieKey.go b/core/ledger/statemgmt/trie/byteTrieKey.go
new file mode 100644
index 00000000000..e004a34aa7d
--- /dev/null
+++ b/core/ledger/statemgmt/trie/byteTrieKey.go
@@ -0,0 +1,86 @@
+/*
+Copyright IBM Corp. 2016 All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package trie
+
+import (
+ "encoding/binary"
+ "fmt"
+ "math"
+)
+
+var numBytesAtEachLevel = 1
+
+type byteTrieKeyEncoder struct {
+}
+
+func newByteTrieKeyEncoder() trieKeyEncoder {
+ return &byteTrieKeyEncoder{}
+}
+
+func (encoder *byteTrieKeyEncoder) newTrieKey(originalBytes []byte) trieKeyInterface {
+ len := len(originalBytes)
+ remainingBytes := len % numBytesAtEachLevel
+ bytesToAppend := 0
+ if remainingBytes != 0 {
+ bytesToAppend = numBytesAtEachLevel - remainingBytes
+ }
+ for i := 0; i < bytesToAppend; i++ {
+ originalBytes = append(originalBytes, byte(0))
+ }
+ return byteTrieKey(originalBytes)
+}
+
+func (encoder *byteTrieKeyEncoder) decodeTrieKeyBytes(encodedBytes []byte) []byte {
+ return encodedBytes
+}
+
+func (encoder *byteTrieKeyEncoder) getMaxTrieWidth() int {
+ return int(math.Pow(2, float64(8*numBytesAtEachLevel)))
+}
+
+type byteTrieKey string
+
+func (key byteTrieKey) getLevel() int {
+ return len(key) / numBytesAtEachLevel
+}
+
+func (key byteTrieKey) getParentTrieKey() trieKeyInterface {
+ if key.isRootKey() {
+ panic(fmt.Errorf("Parent for Trie root shoould not be asked for"))
+ }
+ return key[:len(key)-numBytesAtEachLevel]
+}
+
+func (key byteTrieKey) getIndexInParent() int {
+ if key.isRootKey() {
+ panic(fmt.Errorf("Parent for Trie root should not be asked for"))
+ }
+ indexBytes := []byte{}
+ for i := 0; i < 8-numBytesAtEachLevel; i++ {
+ indexBytes = append(indexBytes, byte(0))
+ }
+ indexBytes = append(indexBytes, []byte(key[len(key)-numBytesAtEachLevel:])...)
+ return int(binary.BigEndian.Uint64(indexBytes))
+}
+
+func (key byteTrieKey) getEncodedBytes() []byte {
+ return []byte(key)
+}
+
+func (key byteTrieKey) isRootKey() bool {
+ return len(key) == 0
+}
diff --git a/core/ledger/statemgmt/trie/hexTrieKey.go b/core/ledger/statemgmt/trie/hexTrieKey.go
new file mode 100644
index 00000000000..af7d2760421
--- /dev/null
+++ b/core/ledger/statemgmt/trie/hexTrieKey.go
@@ -0,0 +1,92 @@
+/*
+Copyright IBM Corp. 2016 All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package trie
+
+import (
+ "encoding/hex"
+ "fmt"
+)
+
+var charIndexMap = map[hexTrieKey]int{
+ "0": 0,
+ "1": 1,
+ "2": 2,
+ "3": 3,
+ "4": 4,
+ "5": 5,
+ "6": 6,
+ "7": 7,
+ "8": 8,
+ "9": 9,
+ "a": 10,
+ "b": 11,
+ "c": 12,
+ "d": 13,
+ "e": 14,
+ "f": 15,
+}
+
+type hexTrieKeyEncoder struct {
+}
+
+func newHexTrieKeyEncoder() trieKeyEncoder {
+ return &hexTrieKeyEncoder{}
+}
+
+func (encoder *hexTrieKeyEncoder) newTrieKey(originalBytes []byte) trieKeyInterface {
+ return hexTrieKey(hex.EncodeToString(originalBytes))
+}
+
+func (encoder *hexTrieKeyEncoder) decodeTrieKeyBytes(encodedBytes []byte) []byte {
+ originalBytes, err := hex.DecodeString(string(encodedBytes))
+ if err != nil {
+ panic(fmt.Errorf("Invalid input: input bytes=[%x], error:%s", encodedBytes, err))
+ }
+ return originalBytes
+}
+
+func (encoder *hexTrieKeyEncoder) getMaxTrieWidth() int {
+ return len(charIndexMap)
+}
+
+type hexTrieKey string
+
+func (key hexTrieKey) getLevel() int {
+ return len(key)
+}
+
+func (key hexTrieKey) getParentTrieKey() trieKeyInterface {
+ if key.isRootKey() {
+ panic(fmt.Errorf("Parent for Trie root shoould not be asked for"))
+ }
+ return key[:len(key)-1]
+}
+
+func (key hexTrieKey) getIndexInParent() int {
+ if key.isRootKey() {
+ panic(fmt.Errorf("Parent for Trie root shoould not be asked for"))
+ }
+ return charIndexMap[key[len(key)-1:]]
+}
+
+func (key hexTrieKey) getEncodedBytes() []byte {
+ return []byte(key)
+}
+
+func (key hexTrieKey) isRootKey() bool {
+ return len(key) == 0
+}
diff --git a/core/ledger/statemgmt/trie/pkg_test.go b/core/ledger/statemgmt/trie/pkg_test.go
new file mode 100644
index 00000000000..5f29b59b66f
--- /dev/null
+++ b/core/ledger/statemgmt/trie/pkg_test.go
@@ -0,0 +1,104 @@
+/*
+Copyright IBM Corp. 2016 All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package trie
+
+import (
+ "os"
+ "testing"
+
+ "github.com/golang/protobuf/proto"
+ "github.com/hyperledger/fabric/core/db"
+ "github.com/hyperledger/fabric/core/ledger/statemgmt"
+ "github.com/hyperledger/fabric/core/ledger/testutil"
+ "github.com/hyperledger/fabric/core/util"
+ "github.com/tecbot/gorocksdb"
+)
+
+var testDBWrapper = db.NewTestDBWrapper()
+
+type stateTrieTestWrapper struct {
+ stateTrie *StateTrie
+ t *testing.T
+}
+
+func newStateTrieTestWrapper(t *testing.T) *stateTrieTestWrapper {
+ return &stateTrieTestWrapper{NewStateTrie(), t}
+}
+
+func (stateTrieTestWrapper *stateTrieTestWrapper) Get(chaincodeID string, key string) []byte {
+ value, err := stateTrieTestWrapper.stateTrie.Get(chaincodeID, key)
+ testutil.AssertNoError(stateTrieTestWrapper.t, err, "Error while getting value")
+ stateTrieTestWrapper.t.Logf("state value for chaincodeID,key=[%s,%s] = [%s], ", chaincodeID, key, string(value))
+ return value
+}
+
+func (stateTrieTestWrapper *stateTrieTestWrapper) PrepareWorkingSetAndComputeCryptoHash(stateDelta *statemgmt.StateDelta) []byte {
+ stateTrieTestWrapper.stateTrie.PrepareWorkingSet(stateDelta)
+ cryptoHash, err := stateTrieTestWrapper.stateTrie.ComputeCryptoHash()
+ testutil.AssertNoError(stateTrieTestWrapper.t, err, "Error while computing crypto hash")
+ stateTrieTestWrapper.t.Logf("Cryptohash = [%x]", cryptoHash)
+ return cryptoHash
+}
+
+func (stateTrieTestWrapper *stateTrieTestWrapper) AddChangesForPersistence(writeBatch *gorocksdb.WriteBatch) {
+ err := stateTrieTestWrapper.stateTrie.AddChangesForPersistence(writeBatch)
+ testutil.AssertNoError(stateTrieTestWrapper.t, err, "Error while adding changes to db write-batch")
+}
+
+func (stateTrieTestWrapper *stateTrieTestWrapper) PersistChangesAndResetInMemoryChanges() {
+ writeBatch := gorocksdb.NewWriteBatch()
+ defer writeBatch.Destroy()
+ stateTrieTestWrapper.AddChangesForPersistence(writeBatch)
+ testDBWrapper.WriteToDB(stateTrieTestWrapper.t, writeBatch)
+ stateTrieTestWrapper.stateTrie.ClearWorkingSet(true)
+}
+
+type trieNodeTestWrapper struct {
+ trieNode *trieNode
+ t *testing.T
+}
+
+func (trieNodeTestWrapper *trieNodeTestWrapper) marshal() []byte {
+ serializedContent, err := trieNodeTestWrapper.trieNode.marshal()
+ testutil.AssertNoError(trieNodeTestWrapper.t, err, "Error while marshalling trieNode")
+ return serializedContent
+}
+
+func (trieNodeTestWrapper *trieNodeTestWrapper) unmarshal(key *trieKey, serializedContent []byte) *trieNode {
+ trieNode, err := unmarshalTrieNode(key, serializedContent)
+ testutil.AssertNoError(trieNodeTestWrapper.t, err, "Error while unmarshalling trieNode")
+ return trieNode
+}
+
+func TestMain(m *testing.M) {
+ testutil.SetupTestConfig()
+ os.Exit(m.Run())
+}
+
+func expectedCryptoHashForTest(key *trieKey, value []byte, childrenHashes ...[]byte) []byte {
+ expectedHash := []byte{}
+ if key != nil {
+ keyBytes := key.getEncodedBytes()
+ expectedHash = append(expectedHash, proto.EncodeVarint(uint64(len(keyBytes)))...)
+ expectedHash = append(expectedHash, keyBytes...)
+ expectedHash = append(expectedHash, value...)
+ }
+ for _, b := range childrenHashes {
+ expectedHash = append(expectedHash, b...)
+ }
+ return util.ComputeCryptoHash(expectedHash)
+}
diff --git a/core/ledger/statemgmt/trie/range_scan_iterator.go b/core/ledger/statemgmt/trie/range_scan_iterator.go
new file mode 100644
index 00000000000..1fb4a91db8a
--- /dev/null
+++ b/core/ledger/statemgmt/trie/range_scan_iterator.go
@@ -0,0 +1,83 @@
+/*
+Copyright IBM Corp. 2016 All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package trie
+
+import (
+ "github.com/hyperledger/fabric/core/db"
+ "github.com/hyperledger/fabric/core/ledger/statemgmt"
+ "github.com/tecbot/gorocksdb"
+)
+
+// RangeScanIterator implements the interface 'statemgmt.RangeScanIterator'
+type RangeScanIterator struct {
+ dbItr *gorocksdb.Iterator
+ chaincodeID string
+ endKey string
+ currentKey string
+ currentValue []byte
+ done bool
+}
+
+func newRangeScanIterator(chaincodeID string, startKey string, endKey string) (*RangeScanIterator, error) {
+ dbItr := db.GetDBHandle().GetStateCFIterator()
+ encodedStartKey := newTrieKey(chaincodeID, startKey).getEncodedBytes()
+ dbItr.Seek(encodedStartKey)
+ return &RangeScanIterator{dbItr, chaincodeID, endKey, "", nil, false}, nil
+}
+
+// Next - see interface 'statemgmt.RangeScanIterator' for details
+func (itr *RangeScanIterator) Next() bool {
+ if itr.done {
+ return false
+ }
+ for ; itr.dbItr.Valid(); itr.dbItr.Next() {
+
+ // making a copy of key-value bytes because, underlying key bytes are reused by itr.
+ // no need to free slices as iterator frees memory when closed.
+ trieKeyBytes := statemgmt.Copy(itr.dbItr.Key().Data())
+ trieNodeBytes := statemgmt.Copy(itr.dbItr.Value().Data())
+ value := unmarshalTrieNodeValue(trieNodeBytes)
+ if value == nil {
+ continue
+ }
+
+ // found an actual key
+ currentCompositeKey := trieKeyEncoderImpl.decodeTrieKeyBytes(statemgmt.Copy(trieKeyBytes))
+ currentChaincodeID, currentKey := statemgmt.DecodeCompositeKey(currentCompositeKey)
+ if currentChaincodeID == itr.chaincodeID && (itr.endKey == "" || currentKey <= itr.endKey) {
+ itr.currentKey = currentKey
+ itr.currentValue = value
+ itr.dbItr.Next()
+ return true
+ }
+
+ // retrieved all the keys in the given range
+ break
+ }
+ itr.done = true
+ return false
+}
+
+// GetKeyValue - see interface 'statemgmt.RangeScanIterator' for details
+func (itr *RangeScanIterator) GetKeyValue() (string, []byte) {
+ return itr.currentKey, itr.currentValue
+}
+
+// Close - see interface 'statemgmt.RangeScanIterator' for details
+func (itr *RangeScanIterator) Close() {
+ itr.dbItr.Close()
+}
diff --git a/core/ledger/statemgmt/trie/range_scan_iterator_test.go b/core/ledger/statemgmt/trie/range_scan_iterator_test.go
new file mode 100644
index 00000000000..1bfefea643f
--- /dev/null
+++ b/core/ledger/statemgmt/trie/range_scan_iterator_test.go
@@ -0,0 +1,123 @@
+/*
+Copyright IBM Corp. 2016 All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package trie
+
+import (
+ "testing"
+
+ "github.com/hyperledger/fabric/core/ledger/statemgmt"
+ "github.com/hyperledger/fabric/core/ledger/testutil"
+)
+
+func TestRangeScanIterator(t *testing.T) {
+ testDBWrapper.CleanDB(t)
+ stateTrieTestWrapper := newStateTrieTestWrapper(t)
+ stateTrie := stateTrieTestWrapper.stateTrie
+ stateDelta := statemgmt.NewStateDelta()
+
+ // insert keys
+ stateDelta.Set("chaincodeID1", "key1", []byte("value1"), nil)
+
+ stateDelta.Set("chaincodeID2", "key1", []byte("value1"), nil)
+ stateDelta.Set("chaincodeID2", "key2", []byte("value2"), nil)
+ stateDelta.Set("chaincodeID2", "key3", []byte("value3"), nil)
+ stateDelta.Set("chaincodeID2", "key4", []byte("value4"), nil)
+ stateDelta.Set("chaincodeID2", "key5", []byte("value5"), nil)
+ stateDelta.Set("chaincodeID2", "key6", []byte("value6"), nil)
+ stateDelta.Set("chaincodeID2", "key7", []byte("value7"), nil)
+
+ stateDelta.Set("chaincodeID3", "key1", []byte("value1"), nil)
+
+ stateDelta.Set("chaincodeID4", "key1", []byte("value1"), nil)
+ stateDelta.Set("chaincodeID4", "key2", []byte("value2"), nil)
+ stateDelta.Set("chaincodeID4", "key3", []byte("value3"), nil)
+ stateDelta.Set("chaincodeID4", "key4", []byte("value4"), nil)
+ stateDelta.Set("chaincodeID4", "key5", []byte("value5"), nil)
+ stateDelta.Set("chaincodeID4", "key6", []byte("value6"), nil)
+ stateDelta.Set("chaincodeID4", "key7", []byte("value7"), nil)
+
+ stateDelta.Set("chaincodeID5", "key1", []byte("value5"), nil)
+ stateDelta.Set("chaincodeID6", "key1", []byte("value6"), nil)
+
+ stateTrie.PrepareWorkingSet(stateDelta)
+ stateTrieTestWrapper.PersistChangesAndResetInMemoryChanges()
+
+ // test range scan for chaincodeID2
+ rangeScanItr, _ := stateTrie.GetRangeScanIterator("chaincodeID2", "key2", "key5")
+
+ var results = make(map[string][]byte)
+ for rangeScanItr.Next() {
+ key, value := rangeScanItr.GetKeyValue()
+ results[key] = value
+ }
+ t.Logf("Results = %s", results)
+ testutil.AssertEquals(t, len(results), 4)
+ testutil.AssertEquals(t, results["key2"], []byte("value2"))
+ testutil.AssertEquals(t, results["key3"], []byte("value3"))
+ testutil.AssertEquals(t, results["key4"], []byte("value4"))
+ testutil.AssertEquals(t, results["key5"], []byte("value5"))
+ rangeScanItr.Close()
+
+ // test range scan for chaincodeID4
+ rangeScanItr, _ = stateTrie.GetRangeScanIterator("chaincodeID2", "key3", "key6")
+ results = make(map[string][]byte)
+ for rangeScanItr.Next() {
+ key, value := rangeScanItr.GetKeyValue()
+ results[key] = value
+ }
+ t.Logf("Results = %s", results)
+ testutil.AssertEquals(t, len(results), 4)
+ testutil.AssertEquals(t, results["key3"], []byte("value3"))
+ testutil.AssertEquals(t, results["key4"], []byte("value4"))
+ testutil.AssertEquals(t, results["key5"], []byte("value5"))
+ testutil.AssertEquals(t, results["key6"], []byte("value6"))
+ rangeScanItr.Close()
+
+ // test range scan for chaincodeID2 starting from first key
+ rangeScanItr, _ = stateTrie.GetRangeScanIterator("chaincodeID2", "", "key5")
+ results = make(map[string][]byte)
+ for rangeScanItr.Next() {
+ key, value := rangeScanItr.GetKeyValue()
+ results[key] = value
+ }
+ t.Logf("Results = %s", results)
+ testutil.AssertEquals(t, len(results), 5)
+ testutil.AssertEquals(t, results["key1"], []byte("value1"))
+ testutil.AssertEquals(t, results["key2"], []byte("value2"))
+ testutil.AssertEquals(t, results["key3"], []byte("value3"))
+ testutil.AssertEquals(t, results["key4"], []byte("value4"))
+ testutil.AssertEquals(t, results["key5"], []byte("value5"))
+ rangeScanItr.Close()
+
+ // test range scan for all the keys in chaincodeID2 starting from first key
+ rangeScanItr, _ = stateTrie.GetRangeScanIterator("chaincodeID2", "", "")
+ results = make(map[string][]byte)
+ for rangeScanItr.Next() {
+ key, value := rangeScanItr.GetKeyValue()
+ results[key] = value
+ }
+ t.Logf("Results = %s", results)
+ testutil.AssertEquals(t, len(results), 7)
+ testutil.AssertEquals(t, results["key1"], []byte("value1"))
+ testutil.AssertEquals(t, results["key2"], []byte("value2"))
+ testutil.AssertEquals(t, results["key3"], []byte("value3"))
+ testutil.AssertEquals(t, results["key4"], []byte("value4"))
+ testutil.AssertEquals(t, results["key5"], []byte("value5"))
+ testutil.AssertEquals(t, results["key6"], []byte("value6"))
+ testutil.AssertEquals(t, results["key7"], []byte("value7"))
+ rangeScanItr.Close()
+}
diff --git a/core/ledger/statemgmt/trie/snapshot_iterator.go b/core/ledger/statemgmt/trie/snapshot_iterator.go
new file mode 100644
index 00000000000..b9f182f7070
--- /dev/null
+++ b/core/ledger/statemgmt/trie/snapshot_iterator.go
@@ -0,0 +1,69 @@
+/*
+Copyright IBM Corp. 2016 All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package trie
+
+import (
+ "github.com/hyperledger/fabric/core/db"
+ "github.com/hyperledger/fabric/core/ledger/statemgmt"
+ "github.com/tecbot/gorocksdb"
+)
+
+// StateSnapshotIterator implements the interface 'statemgmt.StateSnapshotIterator'
+type StateSnapshotIterator struct {
+ dbItr *gorocksdb.Iterator
+ currentKey []byte
+ currentValue []byte
+}
+
+func newStateSnapshotIterator(snapshot *gorocksdb.Snapshot) (*StateSnapshotIterator, error) {
+ dbItr := db.GetDBHandle().GetStateCFSnapshotIterator(snapshot)
+ dbItr.SeekToFirst()
+ // skip the root key, because, the value test in Next method is misleading for root key as the value field
+ dbItr.Next()
+ return &StateSnapshotIterator{dbItr, nil, nil}, nil
+}
+
+// Next - see interface 'statemgmt.StateSnapshotIterator' for details
+func (snapshotItr *StateSnapshotIterator) Next() bool {
+ var available bool
+ for ; snapshotItr.dbItr.Valid(); snapshotItr.dbItr.Next() {
+
+ // making a copy of key-value bytes because, underlying key bytes are reused by itr.
+ // no need to free slices as iterator frees memory when closed.
+ trieKeyBytes := statemgmt.Copy(snapshotItr.dbItr.Key().Data())
+ trieNodeBytes := statemgmt.Copy(snapshotItr.dbItr.Value().Data())
+ value := unmarshalTrieNodeValue(trieNodeBytes)
+ if value != nil {
+ snapshotItr.currentKey = trieKeyEncoderImpl.decodeTrieKeyBytes(statemgmt.Copy(trieKeyBytes))
+ snapshotItr.currentValue = value
+ available = true
+ snapshotItr.dbItr.Next()
+ break
+ }
+ }
+ return available
+}
+
+// GetRawKeyValue - see interface 'statemgmt.StateSnapshotIterator' for details
+func (snapshotItr *StateSnapshotIterator) GetRawKeyValue() ([]byte, []byte) {
+ return snapshotItr.currentKey, snapshotItr.currentValue
+}
+
+// Close - see interface 'statemgmt.StateSnapshotIterator' for details
+func (snapshotItr *StateSnapshotIterator) Close() {
+ snapshotItr.dbItr.Close()
+}
diff --git a/core/ledger/statemgmt/trie/snapshot_iterator_test.go b/core/ledger/statemgmt/trie/snapshot_iterator_test.go
new file mode 100644
index 00000000000..cefdcf56138
--- /dev/null
+++ b/core/ledger/statemgmt/trie/snapshot_iterator_test.go
@@ -0,0 +1,85 @@
+/*
+Copyright IBM Corp. 2016 All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package trie
+
+import (
+ "testing"
+
+ "github.com/hyperledger/fabric/core/db"
+ "github.com/hyperledger/fabric/core/ledger/statemgmt"
+ "github.com/hyperledger/fabric/core/ledger/testutil"
+)
+
+func TestStateSnapshotIterator(t *testing.T) {
+ testDBWrapper.CleanDB(t)
+ stateTrieTestWrapper := newStateTrieTestWrapper(t)
+ stateTrie := stateTrieTestWrapper.stateTrie
+ stateDelta := statemgmt.NewStateDelta()
+
+ // insert keys
+ stateDelta.Set("chaincodeID1", "key1", []byte("value1"), nil)
+ stateDelta.Set("chaincodeID2", "key2", []byte("value2"), nil)
+ stateDelta.Set("chaincodeID3", "key3", []byte("value3"), nil)
+ stateDelta.Set("chaincodeID4", "key4", []byte("value4"), nil)
+ stateDelta.Set("chaincodeID5", "key5", []byte("value5"), nil)
+ stateDelta.Set("chaincodeID6", "key6", []byte("value6"), nil)
+ stateTrie.PrepareWorkingSet(stateDelta)
+ stateTrieTestWrapper.PersistChangesAndResetInMemoryChanges()
+ //check that the key is persisted
+ testutil.AssertEquals(t, stateTrieTestWrapper.Get("chaincodeID1", "key1"), []byte("value1"))
+ testutil.AssertEquals(t, stateTrieTestWrapper.Get("chaincodeID2", "key2"), []byte("value2"))
+ testutil.AssertEquals(t, stateTrieTestWrapper.Get("chaincodeID3", "key3"), []byte("value3"))
+ testutil.AssertEquals(t, stateTrieTestWrapper.Get("chaincodeID4", "key4"), []byte("value4"))
+ testutil.AssertEquals(t, stateTrieTestWrapper.Get("chaincodeID5", "key5"), []byte("value5"))
+ testutil.AssertEquals(t, stateTrieTestWrapper.Get("chaincodeID6", "key6"), []byte("value6"))
+
+ // take db snapeshot
+ dbSnapshot := db.GetDBHandle().GetSnapshot()
+
+ stateDelta1 := statemgmt.NewStateDelta()
+ // delete a few keys
+ stateDelta1.Delete("chaincodeID1", "key1", nil)
+ stateDelta1.Delete("chaincodeID3", "key3", nil)
+ stateDelta1.Delete("chaincodeID4", "key4", nil)
+ stateDelta1.Delete("chaincodeID6", "key6", nil)
+
+ // update remaining keys
+ stateDelta1.Set("chaincodeID2", "key2", []byte("value2_new"), nil)
+ stateDelta1.Set("chaincodeID5", "key5", []byte("value5_new"), nil)
+
+ stateTrie.PrepareWorkingSet(stateDelta1)
+ stateTrieTestWrapper.PersistChangesAndResetInMemoryChanges()
+ //check that the keys are updated
+ testutil.AssertNil(t, stateTrieTestWrapper.Get("chaincodeID1", "key1"))
+ testutil.AssertNil(t, stateTrieTestWrapper.Get("chaincodeID3", "key3"))
+ testutil.AssertNil(t, stateTrieTestWrapper.Get("chaincodeID4", "key4"))
+ testutil.AssertNil(t, stateTrieTestWrapper.Get("chaincodeID6", "key6"))
+ testutil.AssertEquals(t, stateTrieTestWrapper.Get("chaincodeID2", "key2"), []byte("value2_new"))
+ testutil.AssertEquals(t, stateTrieTestWrapper.Get("chaincodeID5", "key5"), []byte("value5_new"))
+
+ itr, err := newStateSnapshotIterator(dbSnapshot)
+ testutil.AssertNoError(t, err, "Error while getting state snapeshot iterator")
+
+ stateDeltaFromSnapshot := statemgmt.NewStateDelta()
+ for itr.Next() {
+ keyBytes, valueBytes := itr.GetRawKeyValue()
+ t.Logf("key=[%s], value=[%s]", string(keyBytes), string(valueBytes))
+ chaincodeID, key := statemgmt.DecodeCompositeKey(keyBytes)
+ stateDeltaFromSnapshot.Set(chaincodeID, key, valueBytes, nil)
+ }
+ testutil.AssertEquals(t, stateDelta, stateDeltaFromSnapshot)
+}
diff --git a/core/ledger/statemgmt/trie/state_trie.go b/core/ledger/statemgmt/trie/state_trie.go
new file mode 100644
index 00000000000..ad200e8006d
--- /dev/null
+++ b/core/ledger/statemgmt/trie/state_trie.go
@@ -0,0 +1,190 @@
+/*
+Copyright IBM Corp. 2016 All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package trie
+
+import (
+ "fmt"
+
+ "github.com/hyperledger/fabric/core/db"
+ "github.com/hyperledger/fabric/core/ledger/statemgmt"
+ "github.com/op/go-logging"
+ "github.com/tecbot/gorocksdb"
+)
+
+var stateTrieLogger = logging.MustGetLogger("stateTrie")
+var logHashOfEveryNode = false
+
+// StateTrie defines the trie for the state, a merkle tree where keys
+// and values are stored for fast hash computation.
+type StateTrie struct {
+ trieDelta *trieDelta
+ persistedStateHash []byte
+ lastComputedCryptoHash []byte
+ recomputeCryptoHash bool
+}
+
+// NewStateTrie contructs a new empty StateTrie
+func NewStateTrie() *StateTrie {
+ return &StateTrie{}
+}
+
+// Initialize the state trie with the root key
+func (stateTrie *StateTrie) Initialize(configs map[string]interface{}) error {
+ rootNode, err := fetchTrieNodeFromDB(rootTrieKey)
+ if err != nil {
+ panic(fmt.Errorf("Error in fetching root node from DB while initializing state trie: %s", err))
+ }
+ if rootNode != nil {
+ stateTrie.persistedStateHash = rootNode.computeCryptoHash()
+ stateTrie.lastComputedCryptoHash = stateTrie.persistedStateHash
+ }
+ return nil
+}
+
+// Get the value for a given chaincode ID and key
+func (stateTrie *StateTrie) Get(chaincodeID string, key string) ([]byte, error) {
+ trieNode, err := fetchTrieNodeFromDB(newTrieKey(chaincodeID, key))
+ if err != nil {
+ return nil, err
+ }
+ if trieNode == nil {
+ return nil, nil
+ }
+ return trieNode.value, nil
+}
+
+// PrepareWorkingSet creates the start of a new delta
+func (stateTrie *StateTrie) PrepareWorkingSet(stateDelta *statemgmt.StateDelta) error {
+ stateTrie.trieDelta = newTrieDelta(stateDelta)
+ stateTrie.recomputeCryptoHash = true
+ return nil
+}
+
+// ClearWorkingSet clears the existing delta
+func (stateTrie *StateTrie) ClearWorkingSet(changesPersisted bool) {
+ stateTrie.trieDelta = nil
+ stateTrie.recomputeCryptoHash = false
+
+ if changesPersisted {
+ stateTrie.persistedStateHash = stateTrie.lastComputedCryptoHash
+ } else {
+ stateTrie.lastComputedCryptoHash = stateTrie.persistedStateHash
+ }
+}
+
+// ComputeCryptoHash returns the hash of the current state trie
+func (stateTrie *StateTrie) ComputeCryptoHash() ([]byte, error) {
+ stateTrieLogger.Debug("Enter - ComputeCryptoHash()")
+ if !stateTrie.recomputeCryptoHash {
+ stateTrieLogger.Debug("No change since last time crypto-hash was computed. Returning result from last computation")
+ return stateTrie.lastComputedCryptoHash, nil
+ }
+ lowestLevel := stateTrie.trieDelta.getLowestLevel()
+ stateTrieLogger.Debugf("Lowest level in trieDelta = [%d]", lowestLevel)
+ for level := lowestLevel; level > 0; level-- {
+ changedNodes := stateTrie.trieDelta.deltaMap[level]
+ for _, changedNode := range changedNodes {
+ err := stateTrie.processChangedNode(changedNode)
+ if err != nil {
+ return nil, err
+ }
+ }
+ }
+ trieRootNode := stateTrie.trieDelta.getTrieRootNode()
+ if trieRootNode == nil {
+ return stateTrie.lastComputedCryptoHash, nil
+ }
+ stateTrie.lastComputedCryptoHash = trieRootNode.computeCryptoHash()
+ stateTrie.recomputeCryptoHash = false
+ hash := stateTrie.lastComputedCryptoHash
+ stateTrieLogger.Debug("Exit - ComputeCryptoHash()")
+ return hash, nil
+}
+
+func (stateTrie *StateTrie) processChangedNode(changedNode *trieNode) error {
+ stateTrieLogger.Debugf("Enter - processChangedNode() for node [%s]", changedNode)
+ dbNode, err := fetchTrieNodeFromDB(changedNode.trieKey)
+ if err != nil {
+ return err
+ }
+ if dbNode != nil {
+ stateTrieLogger.Debugf("processChangedNode() - merging attributes from db node [%s]", dbNode)
+ changedNode.mergeMissingAttributesFrom(dbNode)
+ }
+ newCryptoHash := changedNode.computeCryptoHash()
+ parentNode := stateTrie.trieDelta.getParentOf(changedNode)
+ if parentNode == nil {
+ parentNode = newTrieNode(changedNode.getParentTrieKey(), nil, false)
+ stateTrie.trieDelta.addTrieNode(parentNode)
+ }
+ parentNode.setChildCryptoHash(changedNode.getIndexInParent(), newCryptoHash)
+ if logHashOfEveryNode {
+ stateTrieLogger.Debugf("Hash for changedNode[%s]", changedNode)
+ stateTrieLogger.Debugf("%#v", newCryptoHash)
+ }
+ stateTrieLogger.Debugf("Exit - processChangedNode() for node [%s]", changedNode)
+ return nil
+}
+
+// AddChangesForPersistence commits current changes to the database
+func (stateTrie *StateTrie) AddChangesForPersistence(writeBatch *gorocksdb.WriteBatch) error {
+ if stateTrie.recomputeCryptoHash {
+ _, err := stateTrie.ComputeCryptoHash()
+ if err != nil {
+ return err
+ }
+ }
+
+ if stateTrie.trieDelta == nil {
+ stateTrieLogger.Info("trieDelta is nil. Not writing anything to DB")
+ return nil
+ }
+
+ openchainDB := db.GetDBHandle()
+ lowestLevel := stateTrie.trieDelta.getLowestLevel()
+ for level := lowestLevel; level >= 0; level-- {
+ changedNodes := stateTrie.trieDelta.deltaMap[level]
+ for _, changedNode := range changedNodes {
+ if changedNode.markedForDeletion {
+ writeBatch.DeleteCF(openchainDB.StateCF, changedNode.trieKey.getEncodedBytes())
+ continue
+ }
+ serializedContent, err := changedNode.marshal()
+ if err != nil {
+ return err
+ }
+ writeBatch.PutCF(openchainDB.StateCF, changedNode.trieKey.getEncodedBytes(), serializedContent)
+ }
+ }
+ stateTrieLogger.Debug("Added changes to DB")
+ return nil
+}
+
+// PerfHintKeyChanged is currently a no-op. Can perform pre-fetching of relevant data from db here.
+func (stateTrie *StateTrie) PerfHintKeyChanged(chaincodeID string, key string) {
+ // nothing for now. Can perform pre-fetching of relevant data from db here.
+}
+
+// GetStateSnapshotIterator - method implementation for interface 'statemgmt.HashableState'
+func (stateTrie *StateTrie) GetStateSnapshotIterator(snapshot *gorocksdb.Snapshot) (statemgmt.StateSnapshotIterator, error) {
+ return newStateSnapshotIterator(snapshot)
+}
+
+// GetRangeScanIterator returns an iterator for performing a range scan between the start and end keys
+func (stateTrie *StateTrie) GetRangeScanIterator(chaincodeID string, startKey string, endKey string) (statemgmt.RangeScanIterator, error) {
+ return newRangeScanIterator(chaincodeID, startKey, endKey)
+}
diff --git a/core/ledger/statemgmt/trie/state_trie_test.go b/core/ledger/statemgmt/trie/state_trie_test.go
new file mode 100644
index 00000000000..a9c2f21102f
--- /dev/null
+++ b/core/ledger/statemgmt/trie/state_trie_test.go
@@ -0,0 +1,246 @@
+/*
+Copyright IBM Corp. 2016 All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package trie
+
+import (
+ "testing"
+
+ "github.com/hyperledger/fabric/core/ledger/statemgmt"
+ "github.com/hyperledger/fabric/core/ledger/testutil"
+)
+
+func TestStateTrie_ComputeHash_AllInMemory_NoContents(t *testing.T) {
+ testDBWrapper.CleanDB(t)
+ stateTrie := NewStateTrie()
+ stateTrieTestWrapper := &stateTrieTestWrapper{stateTrie, t}
+ hash := stateTrieTestWrapper.PrepareWorkingSetAndComputeCryptoHash(statemgmt.NewStateDelta())
+ testutil.AssertEquals(t, hash, nil)
+}
+
+func TestStateTrie_ComputeHash_AllInMemory(t *testing.T) {
+ testDBWrapper.CleanDB(t)
+ stateTrie := NewStateTrie()
+ stateTrieTestWrapper := &stateTrieTestWrapper{stateTrie, t}
+ stateDelta := statemgmt.NewStateDelta()
+
+ // Test1 - Add a few keys
+ stateDelta.Set("chaincodeID1", "key1", []byte("value1"), nil)
+ stateDelta.Set("chaincodeID1", "key2", []byte("value2"), nil)
+ stateDelta.Set("chaincodeID2", "key3", []byte("value3"), nil)
+ stateDelta.Set("chaincodeID2", "key4", []byte("value4"), nil)
+ stateTrieTestWrapper.PrepareWorkingSetAndComputeCryptoHash(stateDelta)
+ rootHash1 := stateTrieTestWrapper.PrepareWorkingSetAndComputeCryptoHash(stateDelta)
+
+ hash1 := expectedCryptoHashForTest(newTrieKey("chaincodeID1", "key1"), []byte("value1"))
+ hash2 := expectedCryptoHashForTest(newTrieKey("chaincodeID1", "key2"), []byte("value2"))
+ hash3 := expectedCryptoHashForTest(newTrieKey("chaincodeID2", "key3"), []byte("value3"))
+ hash4 := expectedCryptoHashForTest(newTrieKey("chaincodeID2", "key4"), []byte("value4"))
+
+ hash1Hash2 := expectedCryptoHashForTest(nil, nil, hash1, hash2)
+ hash3Hash4 := expectedCryptoHashForTest(nil, nil, hash3, hash4)
+ expectedRootHash1 := expectedCryptoHashForTest(nil, nil, hash1Hash2, hash3Hash4)
+ testutil.AssertEquals(t, rootHash1, expectedRootHash1)
+ stateTrie.ClearWorkingSet(true)
+
+ //Test2 - Add one more key
+ t.Logf("-- Add one more key exiting key --- ")
+ stateDelta.Set("chaincodeID3", "key5", []byte("value5"), nil)
+ rootHash2 := stateTrieTestWrapper.PrepareWorkingSetAndComputeCryptoHash(stateDelta)
+ hash5 := expectedCryptoHashForTest(newTrieKey("chaincodeID3", "key5"), []byte("value5"))
+ expectedRootHash2 := expectedCryptoHashForTest(nil, nil, hash1Hash2, hash3Hash4, hash5)
+ testutil.AssertEquals(t, rootHash2, expectedRootHash2)
+ stateTrie.ClearWorkingSet(true)
+
+ // Test3 - Remove one of the existing keys
+ t.Logf("-- Remove an exiting key --- ")
+ stateDelta.Delete("chaincodeID2", "key4", nil)
+ rootHash3 := stateTrieTestWrapper.PrepareWorkingSetAndComputeCryptoHash(stateDelta)
+ expectedRootHash3 := expectedCryptoHashForTest(nil, nil, hash1Hash2, hash3, hash5)
+ testutil.AssertEquals(t, rootHash3, expectedRootHash3)
+ stateTrie.ClearWorkingSet(true)
+}
+
+func TestStateTrie_GetSet_WithDB(t *testing.T) {
+ testDBWrapper.CleanDB(t)
+ stateTrie := NewStateTrie()
+ stateTrieTestWrapper := &stateTrieTestWrapper{stateTrie, t}
+ stateDelta := statemgmt.NewStateDelta()
+ stateDelta.Set("chaincodeID1", "key1", []byte("value1"), nil)
+ stateDelta.Set("chaincodeID1", "key2", []byte("value2"), nil)
+ stateDelta.Set("chaincodeID2", "key3", []byte("value3"), nil)
+ stateDelta.Set("chaincodeID2", "key4", []byte("value4"), nil)
+ stateDelta.Set("chaincodeID3", "key5", []byte{}, nil)
+ stateTrieTestWrapper.PrepareWorkingSetAndComputeCryptoHash(stateDelta)
+ stateTrieTestWrapper.PersistChangesAndResetInMemoryChanges()
+ testutil.AssertEquals(t, stateTrieTestWrapper.Get("chaincodeID1", "key1"), []byte("value1"))
+
+ emptyBytes := stateTrieTestWrapper.Get("chaincodeID3", "key5")
+ if emptyBytes == nil || len(emptyBytes) != 0 {
+ t.Fatalf("Expected an empty byte array. found = %#v", emptyBytes)
+ }
+ nilVal := stateTrieTestWrapper.Get("chaincodeID3", "non-existing-key")
+ if nilVal != nil {
+ t.Fatalf("Expected a nil. found = %#v", nilVal)
+ }
+}
+
+func TestStateTrie_ComputeHash_WithDB_Spread_Keys(t *testing.T) {
+ testDBWrapper.CleanDB(t)
+ stateTrie := NewStateTrie()
+ stateTrieTestWrapper := &stateTrieTestWrapper{stateTrie, t}
+
+ // Add a few keys and write to DB
+ stateDelta := statemgmt.NewStateDelta()
+ stateDelta.Set("chaincodeID1", "key1", []byte("value1"), nil)
+ stateDelta.Set("chaincodeID1", "key2", []byte("value2"), nil)
+ stateDelta.Set("chaincodeID2", "key3", []byte("value3"), nil)
+ stateDelta.Set("chaincodeID2", "key4", []byte("value4"), nil)
+ stateTrie.PrepareWorkingSet(stateDelta)
+ stateTrieTestWrapper.PersistChangesAndResetInMemoryChanges()
+
+ /////////////////////////////////////////////////////////
+ // Test1 - Add a non-existing key
+ /////////////////////////////////////////////////////////
+ stateDelta = statemgmt.NewStateDelta()
+ stateDelta.Set("chaincodeID3", "key5", []byte("value5"), nil)
+ rootHash1 := stateTrieTestWrapper.PrepareWorkingSetAndComputeCryptoHash(stateDelta)
+ expectedHash1 := expectedCryptoHashForTest(newTrieKey("chaincodeID1", "key1"), []byte("value1"))
+ expectedHash2 := expectedCryptoHashForTest(newTrieKey("chaincodeID1", "key2"), []byte("value2"))
+ expectedHash3 := expectedCryptoHashForTest(newTrieKey("chaincodeID2", "key3"), []byte("value3"))
+ expectedHash4 := expectedCryptoHashForTest(newTrieKey("chaincodeID2", "key4"), []byte("value4"))
+ expectedHash1Hash2 := expectedCryptoHashForTest(nil, nil, expectedHash1, expectedHash2)
+ expectedHash3Hash4 := expectedCryptoHashForTest(nil, nil, expectedHash3, expectedHash4)
+ expectedHash5 := expectedCryptoHashForTest(newTrieKey("chaincodeID3", "key5"), []byte("value5"))
+ expectedRootHash1 := expectedCryptoHashForTest(nil, nil, expectedHash1Hash2, expectedHash3Hash4, expectedHash5)
+ testutil.AssertEquals(t, rootHash1, expectedRootHash1)
+ stateTrieTestWrapper.PersistChangesAndResetInMemoryChanges()
+
+ /////////////////////////////////////////////////////////
+ // Test2 - Change value of an existing key
+ /////////////////////////////////////////////////////////
+ stateDelta = statemgmt.NewStateDelta()
+ stateDelta.Set("chaincodeID2", "key4", []byte("value4-new"), nil)
+ rootHash2 := stateTrieTestWrapper.PrepareWorkingSetAndComputeCryptoHash(stateDelta)
+ expectedHash4 = expectedCryptoHashForTest(newTrieKey("chaincodeID2", "key4"), []byte("value4-new"))
+ expectedHash3Hash4 = expectedCryptoHashForTest(nil, nil, expectedHash3, expectedHash4)
+ expectedRootHash2 := expectedCryptoHashForTest(nil, nil, expectedHash1Hash2, expectedHash3Hash4, expectedHash5)
+ testutil.AssertEquals(t, rootHash2, expectedRootHash2)
+ stateTrieTestWrapper.PersistChangesAndResetInMemoryChanges()
+
+ /////////////////////////////////////////////////////////
+ // Test3 - Change value of another existing key
+ /////////////////////////////////////////////////////////
+ stateDelta = statemgmt.NewStateDelta()
+ stateDelta.Set("chaincodeID1", "key1", []byte("value1-new"), nil)
+ rootHash3 := stateTrieTestWrapper.PrepareWorkingSetAndComputeCryptoHash(stateDelta)
+ expectedHash1 = expectedCryptoHashForTest(newTrieKey("chaincodeID1", "key1"), []byte("value1-new"))
+ expectedHash1Hash2 = expectedCryptoHashForTest(nil, nil, expectedHash1, expectedHash2)
+ expectedRootHash3 := expectedCryptoHashForTest(nil, nil, expectedHash1Hash2, expectedHash3Hash4, expectedHash5)
+ testutil.AssertEquals(t, rootHash3, expectedRootHash3)
+ stateTrieTestWrapper.PersistChangesAndResetInMemoryChanges()
+
+ /////////////////////////////////////////////////////////
+ // Test4 - Delete an existing existing key
+ /////////////////////////////////////////////////////////
+ t.Logf("-- Delete an existing key ---")
+ stateDelta = statemgmt.NewStateDelta()
+ stateDelta.Delete("chaincodeID3", "key5", nil)
+ rootHash4 := stateTrieTestWrapper.PrepareWorkingSetAndComputeCryptoHash(stateDelta)
+ expectedRootHash4 := expectedCryptoHashForTest(nil, nil, expectedHash1Hash2, expectedHash3Hash4)
+ testutil.AssertEquals(t, rootHash4, expectedRootHash4)
+ stateTrieTestWrapper.PersistChangesAndResetInMemoryChanges()
+ // Delete should remove the key from db because, this key has no value and no children
+ testutil.AssertNil(t, testDBWrapper.GetFromStateCF(t, newTrieKey("chaincodeID3", "key5").getEncodedBytes()))
+
+ /////////////////////////////////////////////////////////
+ // Test5 - Delete another existing existing key
+ /////////////////////////////////////////////////////////
+ stateDelta = statemgmt.NewStateDelta()
+ stateDelta.Delete("chaincodeID2", "key4", nil)
+ rootHash5 := stateTrieTestWrapper.PrepareWorkingSetAndComputeCryptoHash(stateDelta)
+ expectedRootHash5 := expectedCryptoHashForTest(nil, nil, expectedHash1Hash2, expectedHash3)
+ testutil.AssertEquals(t, rootHash5, expectedRootHash5)
+ stateTrieTestWrapper.PersistChangesAndResetInMemoryChanges()
+ testutil.AssertNil(t, testDBWrapper.GetFromStateCF(t, newTrieKey("chaincodeID2", "key4").getEncodedBytes()))
+}
+
+func TestStateTrie_ComputeHash_WithDB_Staggered_Keys(t *testing.T) {
+ testDBWrapper.CleanDB(t)
+ stateTrie := NewStateTrie()
+ stateTrieTestWrapper := &stateTrieTestWrapper{stateTrie, t}
+
+ /////////////////////////////////////////////////////////
+ // Test1 - Add a few staggered keys
+ /////////////////////////////////////////////////////////
+ stateDelta := statemgmt.NewStateDelta()
+ stateDelta.Set("ID", "key1", []byte("value_key1"), nil)
+ stateDelta.Set("ID", "key", []byte("value_key"), nil)
+ stateDelta.Set("ID", "k", []byte("value_k"), nil)
+ expectedHashKey1 := expectedCryptoHashForTest(newTrieKey("ID", "key1"), []byte("value_key1"))
+ expectedHashKey := expectedCryptoHashForTest(newTrieKey("ID", "key"), []byte("value_key"), expectedHashKey1)
+ expectedHashK := expectedCryptoHashForTest(newTrieKey("ID", "k"), []byte("value_k"), expectedHashKey)
+ rootHash1 := stateTrieTestWrapper.PrepareWorkingSetAndComputeCryptoHash(stateDelta)
+ testutil.AssertEquals(t, rootHash1, expectedHashK)
+ stateTrieTestWrapper.PersistChangesAndResetInMemoryChanges()
+
+ /////////////////////////////////////////////////////////
+ // Test2 - Add a new key in path of existing staggered keys
+ /////////////////////////////////////////////////////////
+ t.Logf("- Add a new key in path of existing staggered keys -")
+ stateDelta = statemgmt.NewStateDelta()
+ stateDelta.Set("ID", "ke", []byte("value_ke"), nil)
+ expectedHashKe := expectedCryptoHashForTest(newTrieKey("ID", "ke"), []byte("value_ke"), expectedHashKey)
+ expectedHashK = expectedCryptoHashForTest(newTrieKey("ID", "k"), []byte("value_k"), expectedHashKe)
+ rootHash2 := stateTrieTestWrapper.PrepareWorkingSetAndComputeCryptoHash(stateDelta)
+ testutil.AssertEquals(t, rootHash2, expectedHashK)
+ stateTrieTestWrapper.PersistChangesAndResetInMemoryChanges()
+
+ /////////////////////////////////////////////////////////
+ // Test3 - Change value of one of the existing keys
+ /////////////////////////////////////////////////////////
+ stateDelta = statemgmt.NewStateDelta()
+ stateDelta.Set("ID", "ke", []byte("value_ke_new"), nil)
+ expectedHashKe = expectedCryptoHashForTest(newTrieKey("ID", "ke"), []byte("value_ke_new"), expectedHashKey)
+ expectedHashK = expectedCryptoHashForTest(newTrieKey("ID", "k"), []byte("value_k"), expectedHashKe)
+ rootHash3 := stateTrieTestWrapper.PrepareWorkingSetAndComputeCryptoHash(stateDelta)
+ testutil.AssertEquals(t, rootHash3, expectedHashK)
+ stateTrieTestWrapper.PersistChangesAndResetInMemoryChanges()
+
+ /////////////////////////////////////////////////////////
+ // Test4 - delete one of the existing keys
+ /////////////////////////////////////////////////////////
+ stateDelta = statemgmt.NewStateDelta()
+ stateDelta.Delete("ID", "ke", nil)
+ expectedHashK = expectedCryptoHashForTest(newTrieKey("ID", "k"), []byte("value_k"), expectedHashKey)
+ rootHash4 := stateTrieTestWrapper.PrepareWorkingSetAndComputeCryptoHash(stateDelta)
+ testutil.AssertEquals(t, rootHash4, expectedHashK)
+ stateTrieTestWrapper.PersistChangesAndResetInMemoryChanges()
+ // Delete should not remove the key from db because, this key has children
+ testutil.AssertNotNil(t, testDBWrapper.GetFromStateCF(t, newTrieKey("ID", "ke").getEncodedBytes()))
+ testutil.AssertEquals(t, rootHash1, rootHash4)
+
+ //////////////////////////////////////////////////////////////
+ // Test4 - Add one more key as a sibling of an intermediate node
+ //////////////////////////////////////////////////////////////
+ stateDelta = statemgmt.NewStateDelta()
+ stateDelta.Set("ID", "kez", []byte("value_kez"), nil)
+ expectedHashKez := expectedCryptoHashForTest(newTrieKey("ID", "kez"), []byte("value_kez"))
+ expectedHashKe = expectedCryptoHashForTest(nil, nil, expectedHashKey, expectedHashKez)
+ expectedHashK = expectedCryptoHashForTest(newTrieKey("ID", "k"), []byte("value_k"), expectedHashKe)
+ rootHash5 := stateTrieTestWrapper.PrepareWorkingSetAndComputeCryptoHash(stateDelta)
+ testutil.AssertEquals(t, rootHash5, expectedHashK)
+}
diff --git a/core/ledger/statemgmt/trie/test.yaml b/core/ledger/statemgmt/trie/test.yaml
new file mode 100644
index 00000000000..8e91abd9353
--- /dev/null
+++ b/core/ledger/statemgmt/trie/test.yaml
@@ -0,0 +1,8 @@
+###############################################################################
+#
+# Peer section
+#
+###############################################################################
+peer:
+ # Path on the file system where peer will store data
+ fileSystemPath: /var/hyperledger/test/ledger/statemgmt/trie/testdb
diff --git a/core/ledger/statemgmt/trie/trie_db_helper.go b/core/ledger/statemgmt/trie/trie_db_helper.go
new file mode 100644
index 00000000000..9564ad45a2e
--- /dev/null
+++ b/core/ledger/statemgmt/trie/trie_db_helper.go
@@ -0,0 +1,41 @@
+/*
+Copyright IBM Corp. 2016 All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package trie
+
+import "github.com/hyperledger/fabric/core/db"
+
+func fetchTrieNodeFromDB(key *trieKey) (*trieNode, error) {
+ stateTrieLogger.Debugf("Enter fetchTrieNodeFromDB() for trieKey [%s]", key)
+ openchainDB := db.GetDBHandle()
+ trieNodeBytes, err := openchainDB.GetFromStateCF(key.getEncodedBytes())
+ if err != nil {
+ stateTrieLogger.Errorf("Error in retrieving trie node from DB for triekey [%s]. Error:%s", key, err)
+ return nil, err
+ }
+
+ if trieNodeBytes == nil {
+ return nil, nil
+ }
+
+ trieNode, err := unmarshalTrieNode(key, trieNodeBytes)
+ if err != nil {
+ stateTrieLogger.Errorf("Error in unmarshalling trie node for triekey [%s]. Error:%s", key, err)
+ return nil, err
+ }
+ stateTrieLogger.Debugf("Exit fetchTrieNodeFromDB() for trieKey [%s]", key)
+ return trieNode, nil
+}
diff --git a/core/ledger/statemgmt/trie/trie_delta.go b/core/ledger/statemgmt/trie/trie_delta.go
new file mode 100644
index 00000000000..e28170c7048
--- /dev/null
+++ b/core/ledger/statemgmt/trie/trie_delta.go
@@ -0,0 +1,105 @@
+/*
+Copyright IBM Corp. 2016 All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package trie
+
+import (
+ "github.com/hyperledger/fabric/core/ledger/statemgmt"
+)
+
+type levelDeltaMap map[string]*trieNode
+
+type trieDelta struct {
+ lowestLevel int
+ deltaMap map[int]levelDeltaMap
+}
+
+func newLevelDeltaMap() levelDeltaMap {
+ return levelDeltaMap(make(map[string]*trieNode))
+}
+
+func newTrieDelta(stateDelta *statemgmt.StateDelta) *trieDelta {
+ trieDelta := &trieDelta{0, make(map[int]levelDeltaMap)}
+ chaincodes := stateDelta.GetUpdatedChaincodeIds(false)
+ for _, chaincodeID := range chaincodes {
+ updates := stateDelta.GetUpdates(chaincodeID)
+ for key, updatedvalue := range updates {
+ if updatedvalue.IsDelete() {
+ trieDelta.delete(chaincodeID, key)
+ } else {
+ if stateDelta.RollBackwards {
+ trieDelta.set(chaincodeID, key, updatedvalue.GetPreviousValue())
+ } else {
+ trieDelta.set(chaincodeID, key, updatedvalue.GetValue())
+ }
+ }
+ }
+ }
+ return trieDelta
+}
+
+func (trieDelta *trieDelta) getLowestLevel() int {
+ return trieDelta.lowestLevel
+}
+
+func (trieDelta *trieDelta) getChangesAtLevel(level int) []*trieNode {
+ levelDelta := trieDelta.deltaMap[level]
+ changedNodes := make([]*trieNode, len(levelDelta))
+ for _, v := range levelDelta {
+ changedNodes = append(changedNodes, v)
+ }
+ return changedNodes
+}
+
+func (trieDelta *trieDelta) getParentOf(trieNode *trieNode) *trieNode {
+ parentLevel := trieNode.getParentLevel()
+ parentTrieKey := trieNode.getParentTrieKey()
+ levelDeltaMap := trieDelta.deltaMap[parentLevel]
+ if levelDeltaMap == nil {
+ return nil
+ }
+ return levelDeltaMap[parentTrieKey.getEncodedBytesAsStr()]
+}
+
+func (trieDelta *trieDelta) addTrieNode(trieNode *trieNode) {
+ level := trieNode.getLevel()
+ levelDeltaMap := trieDelta.deltaMap[level]
+ if levelDeltaMap == nil {
+ levelDeltaMap = newLevelDeltaMap()
+ trieDelta.deltaMap[level] = levelDeltaMap
+ }
+ levelDeltaMap[trieNode.trieKey.getEncodedBytesAsStr()] = trieNode
+ if level > trieDelta.lowestLevel {
+ trieDelta.lowestLevel = level
+ }
+}
+
+func (trieDelta *trieDelta) getTrieRootNode() *trieNode {
+ levelZeroMap := trieDelta.deltaMap[0]
+ if levelZeroMap == nil {
+ return nil
+ }
+ return levelZeroMap[rootTrieKeyStr]
+}
+
+func (trieDelta *trieDelta) set(chaincodeId string, key string, value []byte) {
+ trieNode := newTrieNode(newTrieKey(chaincodeId, key), value, true)
+ trieDelta.addTrieNode(trieNode)
+}
+
+func (trieDelta *trieDelta) delete(chaincodeId string, key string) {
+ trieDelta.set(chaincodeId, key, nil)
+}
diff --git a/core/ledger/statemgmt/trie/trie_key.go b/core/ledger/statemgmt/trie/trie_key.go
new file mode 100644
index 00000000000..a57015a6b61
--- /dev/null
+++ b/core/ledger/statemgmt/trie/trie_key.go
@@ -0,0 +1,102 @@
+/*
+Copyright IBM Corp. 2016 All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package trie
+
+import (
+ "bytes"
+ "fmt"
+
+ "github.com/hyperledger/fabric/core/ledger/statemgmt"
+)
+
+type trieKeyEncoder interface {
+ newTrieKey(originalBytes []byte) trieKeyInterface
+ getMaxTrieWidth() int
+ decodeTrieKeyBytes(encodedBytes []byte) (originalBytes []byte)
+}
+
+type trieKeyInterface interface {
+ getLevel() int
+ getParentTrieKey() trieKeyInterface
+ getIndexInParent() int
+ getEncodedBytes() []byte
+}
+
+var trieKeyEncoderImpl trieKeyEncoder = newByteTrieKeyEncoder()
+var rootTrieKeyBytes = []byte{}
+var rootTrieKeyStr = string(rootTrieKeyBytes)
+var rootTrieKey = newTrieKeyFromCompositeKey(rootTrieKeyBytes)
+
+type trieKey struct {
+ trieKeyImpl trieKeyInterface
+}
+
+func newTrieKey(chaincodeID string, key string) *trieKey {
+ compositeKey := statemgmt.ConstructCompositeKey(chaincodeID, key)
+ return newTrieKeyFromCompositeKey(compositeKey)
+}
+
+func newTrieKeyFromCompositeKey(compositeKey []byte) *trieKey {
+ return &trieKey{trieKeyEncoderImpl.newTrieKey(compositeKey)}
+}
+
+func decodeTrieKeyBytes(encodedBytes []byte) []byte {
+ return trieKeyEncoderImpl.decodeTrieKeyBytes(encodedBytes)
+}
+
+func (key *trieKey) getEncodedBytes() []byte {
+ return key.trieKeyImpl.getEncodedBytes()
+}
+
+func (key *trieKey) getLevel() int {
+ return key.trieKeyImpl.getLevel()
+}
+
+func (key *trieKey) getIndexInParent() int {
+ if key.isRootKey() {
+ panic(fmt.Errorf("Parent for Trie root shoould not be asked for"))
+ }
+ return key.trieKeyImpl.getIndexInParent()
+}
+
+func (key *trieKey) getParentTrieKey() *trieKey {
+ if key.isRootKey() {
+ panic(fmt.Errorf("Parent for Trie root shoould not be asked for"))
+ }
+ return &trieKey{key.trieKeyImpl.getParentTrieKey()}
+}
+
+func (key *trieKey) getEncodedBytesAsStr() string {
+ return string(key.trieKeyImpl.getEncodedBytes())
+}
+
+func (key *trieKey) isRootKey() bool {
+ return len(key.getEncodedBytes()) == 0
+}
+
+func (key *trieKey) getParentLevel() int {
+ if key.isRootKey() {
+ panic(fmt.Errorf("Parent for Trie root shoould not be asked for"))
+ }
+ return key.getLevel() - 1
+}
+
+func (key *trieKey) assertIsChildOf(parentTrieKey *trieKey) {
+ if !bytes.Equal(key.getParentTrieKey().getEncodedBytes(), parentTrieKey.getEncodedBytes()) {
+ panic(fmt.Errorf("trie key [%s] is not a child of trie key [%s]", key, parentTrieKey))
+ }
+}
diff --git a/core/ledger/statemgmt/trie/trie_node.go b/core/ledger/statemgmt/trie/trie_node.go
new file mode 100644
index 00000000000..268fba4657e
--- /dev/null
+++ b/core/ledger/statemgmt/trie/trie_node.go
@@ -0,0 +1,243 @@
+/*
+Copyright IBM Corp. 2016 All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package trie
+
+import (
+ "fmt"
+ "sort"
+
+ "github.com/golang/protobuf/proto"
+ "github.com/hyperledger/fabric/core/util"
+)
+
+type trieNode struct {
+ trieKey *trieKey
+ value []byte
+ childrenCryptoHashes map[int][]byte
+
+ valueUpdated bool
+ childrenCryptoHashesUpdated map[int]bool
+ markedForDeletion bool
+}
+
+func newTrieNode(key *trieKey, value []byte, updated bool) *trieNode {
+ return &trieNode{
+ trieKey: key,
+ value: value,
+ childrenCryptoHashes: make(map[int][]byte),
+
+ valueUpdated: updated,
+ childrenCryptoHashesUpdated: make(map[int]bool),
+ }
+}
+
+func (trieNode *trieNode) getLevel() int {
+ return trieNode.trieKey.getLevel()
+}
+
+func (trieNode *trieNode) isRootNode() bool {
+ return trieNode.trieKey.isRootKey()
+}
+
+func (trieNode *trieNode) setChildCryptoHash(index int, childCryptoHash []byte) {
+ if index >= trieKeyEncoderImpl.getMaxTrieWidth() {
+ panic(fmt.Errorf("Index for child crypto-hash cannot be greater than [%d]. Tried to access index value [%d]", trieKeyEncoderImpl.getMaxTrieWidth(), index))
+ }
+ if childCryptoHash != nil {
+ trieNode.childrenCryptoHashes[index] = childCryptoHash
+ }
+ trieNode.childrenCryptoHashesUpdated[index] = true
+}
+
+func (trieNode *trieNode) getParentTrieKey() *trieKey {
+ return trieNode.trieKey.getParentTrieKey()
+}
+
+func (trieNode *trieNode) getParentLevel() int {
+ return trieNode.trieKey.getParentLevel()
+}
+
+func (trieNode *trieNode) getIndexInParent() int {
+ return trieNode.trieKey.getIndexInParent()
+}
+
+func (trieNode *trieNode) mergeMissingAttributesFrom(dbTrieNode *trieNode) {
+ stateTrieLogger.Debugf("Enter mergeMissingAttributesFrom() baseNode=[%s], mergeNode=[%s]", trieNode, dbTrieNode)
+ if !trieNode.valueUpdated {
+ trieNode.value = dbTrieNode.value
+ }
+ for k, v := range dbTrieNode.childrenCryptoHashes {
+ if !trieNode.childrenCryptoHashesUpdated[k] {
+ trieNode.childrenCryptoHashes[k] = v
+ }
+ }
+ stateTrieLogger.Debugf("Exit mergeMissingAttributesFrom() mergedNode=[%s]", trieNode)
+}
+
+func (trieNode *trieNode) computeCryptoHash() []byte {
+ stateTrieLogger.Debugf("Enter computeCryptoHash() for trieNode [%s]", trieNode)
+ var cryptoHashContent []byte
+ if trieNode.containsValue() {
+ stateTrieLogger.Debugf("Adding value to hash computation for trieNode [%s]", trieNode)
+ key := trieNode.trieKey.getEncodedBytes()
+ cryptoHashContent = append(cryptoHashContent, proto.EncodeVarint(uint64(len(key)))...)
+ cryptoHashContent = append(cryptoHashContent, key...)
+ cryptoHashContent = append(cryptoHashContent, trieNode.value...)
+ }
+
+ sortedChildrenIndexes := trieNode.getSortedChildrenIndex()
+ for _, index := range sortedChildrenIndexes {
+ childCryptoHash := trieNode.childrenCryptoHashes[index]
+ stateTrieLogger.Debugf("Adding hash [%#v] for child number [%d] to hash computation for trieNode [%s]", childCryptoHash, index, trieNode)
+ cryptoHashContent = append(cryptoHashContent, childCryptoHash...)
+ }
+
+ if cryptoHashContent == nil {
+ // node has no associated value and no associated children.
+ stateTrieLogger.Debugf("Returning nil as hash for trieNode = [%s]. Also, marking this key for deletion.", trieNode)
+ trieNode.markedForDeletion = true
+ return nil
+ }
+
+ if !trieNode.containsValue() && trieNode.getNumChildren() == 1 {
+ // node has no associated value and has a single child. Propagate the child hash up
+ stateTrieLogger.Debugf("Returning hash as of a single child for trieKey = [%s]", trieNode.trieKey)
+ return cryptoHashContent
+ }
+
+ stateTrieLogger.Debugf("Recomputing hash for trieKey = [%s]", trieNode)
+ return util.ComputeCryptoHash(cryptoHashContent)
+}
+
+func (trieNode *trieNode) containsValue() bool {
+ if trieNode.isRootNode() {
+ return false
+ }
+ return trieNode.value != nil
+}
+
+func (trieNode *trieNode) marshal() ([]byte, error) {
+ buffer := proto.NewBuffer([]byte{})
+
+ // write value marker explicitly because rocksdb apis convertes a nil into an empty array and protobuf does it other-way around
+ var valueMarker uint64 = 0 // ignore golint warning. Dropping '= 0' makes assignment less clear
+ if trieNode.value != nil {
+ valueMarker = 1
+ }
+ err := buffer.EncodeVarint(valueMarker)
+ if err != nil {
+ return nil, err
+ }
+ if trieNode.value != nil {
+ // write value
+ err = buffer.EncodeRawBytes(trieNode.value)
+ if err != nil {
+ return nil, err
+ }
+ }
+ //write number of crypto-hashes
+ numCryptoHashes := trieNode.getNumChildren()
+ err = buffer.EncodeVarint(uint64(numCryptoHashes))
+ if err != nil {
+ return nil, err
+ }
+
+ if numCryptoHashes == 0 {
+ return buffer.Bytes(), nil
+ }
+
+ for i, cryptoHash := range trieNode.childrenCryptoHashes {
+ //write crypto-hash Index
+ err = buffer.EncodeVarint(uint64(i))
+ if err != nil {
+ return nil, err
+ }
+ // write crypto-hash
+ err = buffer.EncodeRawBytes(cryptoHash)
+ if err != nil {
+ return nil, err
+ }
+ }
+ serializedBytes := buffer.Bytes()
+ stateTrieLogger.Debugf("Marshalled trieNode [%s]. Serialized bytes size = %d", trieNode.trieKey, len(serializedBytes))
+ return serializedBytes, nil
+}
+
+func unmarshalTrieNode(key *trieKey, serializedContent []byte) (*trieNode, error) {
+ stateTrieLogger.Debugf("key = [%s], len(serializedContent) = %d", key, len(serializedContent))
+ trieNode := newTrieNode(key, nil, false)
+ buffer := proto.NewBuffer(serializedContent)
+ trieNode.value = unmarshalTrieNodeValueFromBuffer(buffer)
+
+ numCryptoHashes, err := buffer.DecodeVarint()
+ stateTrieLogger.Debugf("numCryptoHashes = [%d]", numCryptoHashes)
+ if err != nil {
+ return nil, err
+ }
+ for i := uint64(0); i < numCryptoHashes; i++ {
+ index, err := buffer.DecodeVarint()
+ if err != nil {
+ return nil, err
+ }
+ cryptoHash, err := buffer.DecodeRawBytes(false)
+ if err != nil {
+ return nil, err
+ }
+ trieNode.childrenCryptoHashes[int(index)] = cryptoHash
+ }
+ stateTrieLogger.Debugf("unmarshalled trieNode = [%s]", trieNode)
+ return trieNode, nil
+}
+
+func unmarshalTrieNodeValue(serializedContent []byte) []byte {
+ return unmarshalTrieNodeValueFromBuffer(proto.NewBuffer(serializedContent))
+}
+
+func unmarshalTrieNodeValueFromBuffer(buffer *proto.Buffer) []byte {
+ valueMarker, err := buffer.DecodeVarint()
+ if err != nil {
+ panic(fmt.Errorf("This error is not excpected: %s", err))
+ }
+ if valueMarker == 0 {
+ return nil
+ }
+ value, err := buffer.DecodeRawBytes(false)
+ if err != nil {
+ panic(fmt.Errorf("This error is not excpected: %s", err))
+ }
+ return value
+}
+
+func (trieNode *trieNode) String() string {
+ return fmt.Sprintf("trieKey=[%s], value=[%#v], Num children hashes=[%#v]",
+ trieNode.trieKey, trieNode.value, trieNode.getNumChildren())
+}
+
+func (trieNode *trieNode) getNumChildren() int {
+ return len(trieNode.childrenCryptoHashes)
+}
+
+func (trieNode *trieNode) getSortedChildrenIndex() []int {
+ keys := make([]int, trieNode.getNumChildren())
+ i := 0
+ for k := range trieNode.childrenCryptoHashes {
+ keys[i] = k
+ i++
+ }
+ sort.Ints(keys)
+ return keys
+}
diff --git a/core/ledger/statemgmt/trie/trie_node_test.go b/core/ledger/statemgmt/trie/trie_node_test.go
new file mode 100644
index 00000000000..a5d9906b80f
--- /dev/null
+++ b/core/ledger/statemgmt/trie/trie_node_test.go
@@ -0,0 +1,140 @@
+/*
+Copyright IBM Corp. 2016 All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package trie
+
+import (
+ "testing"
+
+ "github.com/hyperledger/fabric/core/ledger/testutil"
+)
+
+func TestTrieNode_MarshalUnmarshal_NoValue_NoChildren(t *testing.T) {
+ testTrieNodeMarshalUnmarshal(
+ newTrieNode(newTrieKey("chaincodeID", "key"),
+ []byte{},
+ false),
+ t)
+}
+
+func TestTrieNode_MarshalUnmarshal_WithValue(t *testing.T) {
+ testTrieNodeMarshalUnmarshal(
+ newTrieNode(newTrieKey("chaincodeID", "key"),
+ []byte("Hello!"),
+ false),
+ t)
+}
+
+func TestTrieNode_MarshalUnmarshal_WithChildren(t *testing.T) {
+ trieNode := newTrieNode(newTrieKey("chaincodeID", "key"), []byte("Hello!"), false)
+ trieNode.setChildCryptoHash(0, []byte("crypto-hash-for-test-0"))
+ trieNode.setChildCryptoHash(15, []byte("crypto-hash-for-test-15"))
+ testTrieNodeMarshalUnmarshal(trieNode, t)
+}
+
+func TestTrieNode_MergeAttributes(t *testing.T) {
+ trieNode := newTrieNode(newTrieKey("chaincodeID", "key"), []byte("newValue!"), true)
+ trieNode.setChildCryptoHash(0, []byte("crypto-hash-for-test-0"))
+ trieNode.setChildCryptoHash(5, []byte("crypto-hash-for-test-5"))
+
+ existingTrieNode := newTrieNode(newTrieKey("chaincodeID", "key"), []byte("existingValue"), false)
+ existingTrieNode.setChildCryptoHash(5, []byte("crypto-hash-for-test-5-existing"))
+ existingTrieNode.setChildCryptoHash(10, []byte("crypto-hash-for-test-10-existing"))
+
+ trieNode.mergeMissingAttributesFrom(existingTrieNode)
+ testutil.AssertEquals(t, trieNode.value, []byte("newValue!"))
+ testutil.AssertEquals(t, trieNode.childrenCryptoHashes[0], []byte("crypto-hash-for-test-0"))
+ testutil.AssertEquals(t, trieNode.childrenCryptoHashes[5], []byte("crypto-hash-for-test-5"))
+ testutil.AssertEquals(t, trieNode.childrenCryptoHashes[10], []byte("crypto-hash-for-test-10-existing"))
+}
+
+func TestTrieNode_ComputeCryptoHash_NoValue_NoChild(t *testing.T) {
+ trieNode := newTrieNode(newTrieKey("chaincodeID", "key"), nil, false)
+ hash := trieNode.computeCryptoHash()
+ testutil.AssertEquals(t, hash, nil)
+}
+
+func TestTrieNode_ComputeCryptoHash_NoValue_SingleChild(t *testing.T) {
+ trieNode := newTrieNode(newTrieKey("chaincodeID", "key"), nil, false)
+ singleChildCryptoHash := []byte("childCryptoHash-0")
+ trieNode.setChildCryptoHash(0, singleChildCryptoHash)
+ hash := trieNode.computeCryptoHash()
+ testutil.AssertEquals(t, hash, singleChildCryptoHash)
+}
+
+func TestTrieNode_ComputeCryptoHash_NoValue_ManyChildren(t *testing.T) {
+ trieKey := newTrieKey("chaincodeID", "key")
+ child0CryptoHash := []byte("childCryptoHash-0")
+ child5CryptoHash := []byte("childCryptoHash-5")
+ child15CryptoHash := []byte("childCryptoHash-15")
+
+ trieNode := newTrieNode(trieKey, nil, false)
+ trieNode.setChildCryptoHash(0, child0CryptoHash)
+ trieNode.setChildCryptoHash(5, child5CryptoHash)
+ trieNode.setChildCryptoHash(15, child15CryptoHash)
+ hash := trieNode.computeCryptoHash()
+ expectedHashContent := expectedCryptoHashForTest(nil, nil, child0CryptoHash, child5CryptoHash, child15CryptoHash)
+ testutil.AssertEquals(t, hash, expectedHashContent)
+}
+
+func TestTrieNode_ComputeCryptoHash_WithValue_NoChild(t *testing.T) {
+ trieKey := newTrieKey("chaincodeID", "key")
+ value := []byte("testValue")
+
+ trieNode := newTrieNode(trieKey, value, false)
+ hash := trieNode.computeCryptoHash()
+ expectedHash := expectedCryptoHashForTest(trieKey, value)
+ testutil.AssertEquals(t, hash, expectedHash)
+}
+
+func TestTrieNode_ComputeCryptoHash_WithValue_SingleChild(t *testing.T) {
+ trieKey := newTrieKey("chaincodeID", "key")
+ value := []byte("testValue")
+ child0CryptoHash := []byte("childCryptoHash-0")
+
+ trieNode := newTrieNode(trieKey, value, false)
+ trieNode.setChildCryptoHash(0, child0CryptoHash)
+ hash := trieNode.computeCryptoHash()
+ expectedHash := expectedCryptoHashForTest(trieKey, value, child0CryptoHash)
+ testutil.AssertEquals(t, hash, expectedHash)
+}
+
+func TestTrieNode_ComputeCryptoHash_WithValue_ManyChildren(t *testing.T) {
+ trieKey := newTrieKey("chaincodeID", "key")
+ value := []byte("testValue")
+ child0CryptoHash := []byte("childCryptoHash-0")
+ child5CryptoHash := []byte("childCryptoHash-5")
+ child15CryptoHash := []byte("childCryptoHash-15")
+
+ trieNode := newTrieNode(trieKey, value, false)
+ trieNode.setChildCryptoHash(0, child0CryptoHash)
+ trieNode.setChildCryptoHash(5, child5CryptoHash)
+ trieNode.setChildCryptoHash(15, child15CryptoHash)
+ hash := trieNode.computeCryptoHash()
+
+ expectedHash := expectedCryptoHashForTest(trieKey, value, child0CryptoHash, child5CryptoHash, child15CryptoHash)
+ testutil.AssertEquals(t, hash, expectedHash)
+}
+
+func testTrieNodeMarshalUnmarshal(trieNode *trieNode, t *testing.T) {
+ trieNodeTestWrapper := &trieNodeTestWrapper{trieNode, t}
+ serializedContent := trieNodeTestWrapper.marshal()
+ trieNodeFromUnmarshal := trieNodeTestWrapper.unmarshal(trieNode.trieKey, serializedContent)
+ testutil.AssertEquals(t, trieNodeFromUnmarshal.trieKey, trieNode.trieKey)
+ testutil.AssertEquals(t, trieNodeFromUnmarshal.value, trieNode.value)
+ testutil.AssertEquals(t, trieNodeFromUnmarshal.childrenCryptoHashes, trieNode.childrenCryptoHashes)
+ testutil.AssertEquals(t, trieNodeFromUnmarshal.getNumChildren(), trieNode.getNumChildren())
+}
diff --git a/core/ledger/test.yaml b/core/ledger/test.yaml
new file mode 100644
index 00000000000..1e53fdd7225
--- /dev/null
+++ b/core/ledger/test.yaml
@@ -0,0 +1,17 @@
+###############################################################################
+#
+# Peer section
+#
+###############################################################################
+peer:
+ # Path on the file system where peer will store data
+ fileSystemPath: /var/hyperledger/test/ledger_test
+
+ledger:
+
+ state:
+
+ # Control the number state deltas that are maintained. This takes additional
+ # disk space, but allow the state to be rolled backwards and forwards
+ # without the need to replay transactions.
+ deltaHistorySize: 500
diff --git a/core/ledger/test/ledger_suite_test.go b/core/ledger/test/ledger_suite_test.go
new file mode 100644
index 00000000000..98859a1b2e5
--- /dev/null
+++ b/core/ledger/test/ledger_suite_test.go
@@ -0,0 +1,68 @@
+/*
+Copyright IBM Corp. 2016 All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+package ledger_test
+
+import (
+ "fmt"
+ "strings"
+ "testing"
+
+ . "github.com/onsi/ginkgo"
+ . "github.com/onsi/gomega"
+
+ "github.com/hyperledger/fabric/core/db"
+ "github.com/hyperledger/fabric/core/ledger"
+ "github.com/op/go-logging"
+ "github.com/spf13/viper"
+)
+
+var testDBWrapper *db.TestDBWrapper
+
+func SetupTestConfig() {
+ viper.AddConfigPath(".")
+ viper.SetEnvKeyReplacer(strings.NewReplacer(".", "_"))
+ viper.AutomaticEnv()
+ viper.SetDefault("peer.ledger.test.loadYAML", true)
+ loadYAML := viper.GetBool("peer.ledger.test.loadYAML")
+ if loadYAML {
+ viper.SetConfigName("test")
+ err := viper.ReadInConfig()
+ if err != nil { // Handle errors reading the config file
+ panic(fmt.Errorf("Fatal error config file: %s \n", err))
+ }
+ }
+ var formatter = logging.MustStringFormatter(
+ `%{color}%{time:15:04:05.000} [%{module}] %{shortfunc} [%{shortfile}] -> %{level:.4s} %{id:03x}%{color:reset} %{message}`,
+ )
+ logging.SetFormatter(formatter)
+}
+
+func InitSpec() *ledger.Ledger {
+ if testDBWrapper == nil {
+ testDBWrapper = db.NewTestDBWrapper()
+ }
+ testDBWrapper.CreateFreshDBGinkgo()
+ ledgerPtr, err := ledger.GetNewLedger()
+ if err != nil {
+ Fail("failed to get a fresh ledger")
+ }
+ return ledgerPtr
+}
+
+func TestLedger(t *testing.T) {
+ RegisterFailHandler(Fail)
+ RunSpecs(t, "Ledger Suite")
+}
diff --git a/core/ledger/test/ledger_test.go b/core/ledger/test/ledger_test.go
new file mode 100644
index 00000000000..331f3f3edfb
--- /dev/null
+++ b/core/ledger/test/ledger_test.go
@@ -0,0 +1,1117 @@
+/*
+Copyright IBM Corp. 2016 All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+package ledger_test
+
+import (
+ "bytes"
+ "strconv"
+
+ . "github.com/onsi/ginkgo"
+ . "github.com/onsi/gomega"
+
+ "github.com/hyperledger/fabric/core/ledger"
+ "github.com/hyperledger/fabric/core/ledger/statemgmt"
+ "github.com/hyperledger/fabric/core/util"
+ "github.com/hyperledger/fabric/protos"
+)
+
+func appendAll(content ...[]byte) []byte {
+ combinedContent := []byte{}
+ for _, b := range content {
+ combinedContent = append(combinedContent, b...)
+ }
+ return combinedContent
+}
+
+var _ = Describe("Ledger", func() {
+ var ledgerPtr *ledger.Ledger
+
+ SetupTestConfig()
+
+ Context("Ledger with preexisting uncommitted state", func() {
+
+ BeforeEach(func() {
+ ledgerPtr = InitSpec()
+
+ Expect(ledgerPtr.BeginTxBatch(1)).To(BeNil())
+ ledgerPtr.TxBegin("txUuid")
+ Expect(ledgerPtr.SetState("chaincode1", "key1", []byte("value1"))).To(BeNil())
+ Expect(ledgerPtr.SetState("chaincode2", "key2", []byte("value2"))).To(BeNil())
+ Expect(ledgerPtr.SetState("chaincode3", "key3", []byte("value3"))).To(BeNil())
+ ledgerPtr.TxFinished("txUuid", true)
+ })
+
+ It("should return uncommitted state from memory", func() {
+ state, _ := ledgerPtr.GetState("chaincode1", "key1", false)
+ Expect(state).To(Equal([]byte("value1")))
+ state, _ = ledgerPtr.GetState("chaincode2", "key2", false)
+ Expect(state).To(Equal([]byte("value2")))
+ state, _ = ledgerPtr.GetState("chaincode3", "key3", false)
+ Expect(state).To(Equal([]byte("value3")))
+ })
+ It("should not return committed state", func() {
+ state, _ := ledgerPtr.GetState("chaincode1", "key1", true)
+ Expect(state).To(BeNil())
+ state, _ = ledgerPtr.GetState("chaincode2", "key2", true)
+ Expect(state).To(BeNil())
+ state, _ = ledgerPtr.GetState("chaincode3", "key3", true)
+ Expect(state).To(BeNil())
+ })
+ It("should successfully rollback the batch", func() {
+ Expect(ledgerPtr.RollbackTxBatch(1)).To(BeNil())
+ state, _ := ledgerPtr.GetState("chaincode1", "key1", false)
+ Expect(state).To(BeNil())
+ state, _ = ledgerPtr.GetState("chaincode2", "key2", false)
+ Expect(state).To(BeNil())
+ state, _ = ledgerPtr.GetState("chaincode3", "key3", false)
+ Expect(state).To(BeNil())
+ state, _ = ledgerPtr.GetState("chaincode1", "key1", true)
+ Expect(state).To(BeNil())
+ state, _ = ledgerPtr.GetState("chaincode2", "key2", true)
+ Expect(state).To(BeNil())
+ state, _ = ledgerPtr.GetState("chaincode3", "key3", true)
+ Expect(state).To(BeNil())
+ })
+ It("should commit the batch with the correct ID", func() {
+ uuid := util.GenerateUUID()
+ tx, err := protos.NewTransaction(protos.ChaincodeID{Path: "testUrl"}, uuid, "anyfunction", []string{"param1, param2"})
+ Expect(err).To(BeNil())
+ err = ledgerPtr.CommitTxBatch(1, []*protos.Transaction{tx}, nil, []byte("proof"))
+ Expect(err).To(BeNil())
+ state, _ := ledgerPtr.GetState("chaincode1", "key1", false)
+ Expect(state).To(Equal([]byte("value1")))
+ state, _ = ledgerPtr.GetState("chaincode2", "key2", false)
+ Expect(state).To(Equal([]byte("value2")))
+ state, _ = ledgerPtr.GetState("chaincode3", "key3", false)
+ Expect(state).To(Equal([]byte("value3")))
+ state, _ = ledgerPtr.GetState("chaincode1", "key1", true)
+ Expect(state).To(Equal([]byte("value1")))
+ state, _ = ledgerPtr.GetState("chaincode2", "key2", true)
+ Expect(state).To(Equal([]byte("value2")))
+ state, _ = ledgerPtr.GetState("chaincode3", "key3", true)
+ Expect(state).To(Equal([]byte("value3")))
+ })
+ It("should not commit batch with an incorrect ID", func() {
+ uuid := util.GenerateUUID()
+ tx, err := protos.NewTransaction(protos.ChaincodeID{Path: "testUrl"}, uuid, "anyfunction", []string{"param1, param2"})
+ Expect(err).To(BeNil())
+ err = ledgerPtr.CommitTxBatch(2, []*protos.Transaction{tx}, nil, []byte("proof"))
+ Expect(err).ToNot(BeNil())
+ })
+ It("should get TX Batch Preview info and commit the batch and validate they are equal", func() {
+ uuid := util.GenerateUUID()
+ tx, err := protos.NewTransaction(protos.ChaincodeID{Path: "testUrl"}, uuid, "anyfunction", []string{"param1, param2"})
+ Expect(err).To(BeNil())
+ previewBlockInfo, err := ledgerPtr.GetTXBatchPreviewBlockInfo(1, []*protos.Transaction{tx}, []byte("proof"))
+ Expect(err).To(BeNil())
+ err = ledgerPtr.CommitTxBatch(1, []*protos.Transaction{tx}, nil, []byte("proof"))
+ Expect(err).To(BeNil())
+ commitedBlockInfo, err := ledgerPtr.GetBlockchainInfo()
+ Expect(err).To(BeNil())
+ Expect(previewBlockInfo).To(Equal(commitedBlockInfo))
+ })
+ It("can get a transaction by it's UUID", func() {
+ uuid := util.GenerateUUID()
+ tx, err := protos.NewTransaction(protos.ChaincodeID{Path: "testUrl"}, uuid, "anyfunction", []string{"param1, param2"})
+ Expect(err).To(BeNil())
+ err = ledgerPtr.CommitTxBatch(1, []*protos.Transaction{tx}, nil, []byte("proof"))
+ Expect(err).To(BeNil())
+
+ ledgerTransaction, err := ledgerPtr.GetTransactionByUUID(uuid)
+ Expect(err).To(BeNil())
+ Expect(tx).To(Equal(ledgerTransaction))
+ state, _ := ledgerPtr.GetState("chaincode1", "key1", true)
+ Expect(state).To(Equal([]byte("value1")))
+ state, _ = ledgerPtr.GetState("chaincode2", "key2", true)
+ Expect(state).To(Equal([]byte("value2")))
+ state, _ = ledgerPtr.GetState("chaincode3", "key3", true)
+ Expect(state).To(Equal([]byte("value3")))
+ })
+ It("rollsback the batch and compares values for TempStateHash", func() {
+ var hash0, hash1 []byte
+ var err error
+ Expect(ledgerPtr.RollbackTxBatch(1)).To(BeNil())
+ hash0, err = ledgerPtr.GetTempStateHash()
+ Expect(err).To(BeNil())
+ Expect(ledgerPtr.BeginTxBatch(2)).To(BeNil())
+ ledgerPtr.TxBegin("txUuid")
+ Expect(ledgerPtr.SetState("chaincode1", "key1", []byte("value1"))).To(BeNil())
+ Expect(ledgerPtr.SetState("chaincode2", "key2", []byte("value2"))).To(BeNil())
+ Expect(ledgerPtr.SetState("chaincode3", "key3", []byte("value3"))).To(BeNil())
+ ledgerPtr.TxFinished("txUuid", true)
+ hash1, err = ledgerPtr.GetTempStateHash()
+ Expect(err).To(BeNil())
+ Expect(hash0).ToNot(Equal(hash1))
+ Expect(ledgerPtr.RollbackTxBatch(2)).To(BeNil())
+ hash1, err = ledgerPtr.GetTempStateHash()
+ Expect(err).To(BeNil())
+ Expect(hash0).To(Equal(hash1))
+ })
+ It("commits and validates a batch with a bad transaction result", func() {
+ uuid := util.GenerateUUID()
+ transactionResult := &protos.TransactionResult{Uuid: uuid, ErrorCode: 500, Error: "bad"}
+ tx, err := protos.NewTransaction(protos.ChaincodeID{Path: "testUrl"}, uuid, "anyfunction", []string{"param1, param2"})
+ ledgerPtr.CommitTxBatch(1, []*protos.Transaction{tx}, []*protos.TransactionResult{transactionResult}, []byte("proof"))
+
+ block, err := ledgerPtr.GetBlockByNumber(0)
+ Expect(err).To(BeNil())
+ nonHashData := block.GetNonHashData()
+ Expect(nonHashData).ToNot(BeNil())
+ })
+ })
+
+ Context("Ledger with committed state", func() {
+
+ BeforeEach(func() {
+ ledgerPtr = InitSpec()
+
+ Expect(ledgerPtr.BeginTxBatch(1)).To(BeNil())
+ ledgerPtr.TxBegin("txUuid")
+ Expect(ledgerPtr.SetState("chaincode1", "key1", []byte("value1"))).To(BeNil())
+ Expect(ledgerPtr.SetState("chaincode2", "key2", []byte("value2"))).To(BeNil())
+ Expect(ledgerPtr.SetState("chaincode3", "key3", []byte("value3"))).To(BeNil())
+ ledgerPtr.TxFinished("txUuid", true)
+ uuid := util.GenerateUUID()
+ tx, err := protos.NewTransaction(protos.ChaincodeID{Path: "testUrl"}, uuid, "anyfunction", []string{"param1, param2"})
+ Expect(err).To(BeNil())
+ err = ledgerPtr.CommitTxBatch(1, []*protos.Transaction{tx}, nil, []byte("proof"))
+ Expect(err).To(BeNil())
+ })
+ It("creates and confirms the contents of a snapshot", func() {
+ Expect(ledgerPtr.BeginTxBatch(2)).To(BeNil())
+ snapshot, err := ledgerPtr.GetStateSnapshot()
+ Expect(err).To(BeNil())
+ defer snapshot.Release()
+ ledgerPtr.TxBegin("txUuid")
+ Expect(ledgerPtr.DeleteState("chaincode1", "key1")).To(BeNil())
+ Expect(ledgerPtr.SetState("chaincode4", "key4", []byte("value4"))).To(BeNil())
+ Expect(ledgerPtr.SetState("chaincode5", "key5", []byte("value5"))).To(BeNil())
+ Expect(ledgerPtr.SetState("chaincode6", "key6", []byte("value6"))).To(BeNil())
+ ledgerPtr.TxFinished("txUuid", true)
+ uuid := util.GenerateUUID()
+ tx, err := protos.NewTransaction(protos.ChaincodeID{Path: "testUrl"}, uuid, "anyfunction", []string{"param1, param2"})
+ Expect(err).To(BeNil())
+ err = ledgerPtr.CommitTxBatch(2, []*protos.Transaction{tx}, nil, []byte("proof"))
+ Expect(err).To(BeNil())
+
+ var count = 0
+ for snapshot.Next() {
+ //_, _ := snapshot.GetRawKeyValue()
+ //t.Logf("Key %v, Val %v", k, v)
+ count++
+ }
+ Expect(count).To(Equal(3))
+ Expect(snapshot.GetBlockNumber()).To(Equal(uint64(0)))
+ })
+ It("deletes all state, keys and values from ledger without error", func() {
+ Expect(ledgerPtr.DeleteALLStateKeysAndValues()).To(BeNil())
+ state, _ := ledgerPtr.GetState("chaincode1", "key1", true)
+ Expect(state).To(BeNil())
+ state, _ = ledgerPtr.GetState("chaincode2", "key2", true)
+ Expect(state).To(BeNil())
+ state, _ = ledgerPtr.GetState("chaincode3", "key3", true)
+ Expect(state).To(BeNil())
+ // Test that we can now store new stuff in the state
+ Expect(ledgerPtr.BeginTxBatch(2)).To(BeNil())
+ ledgerPtr.TxBegin("txUuid1")
+ Expect(ledgerPtr.SetState("chaincode1", "key1", []byte("value1"))).To(BeNil())
+ Expect(ledgerPtr.SetState("chaincode2", "key2", []byte("value2"))).To(BeNil())
+ Expect(ledgerPtr.SetState("chaincode3", "key3", []byte("value3"))).To(BeNil())
+ ledgerPtr.TxFinished("txUuid1", true)
+ uuid := util.GenerateUUID()
+ tx, err := protos.NewTransaction(protos.ChaincodeID{Path: "testUrl"}, uuid, "anyfunction", []string{"param1, param2"})
+ Expect(err).To(BeNil())
+ err = ledgerPtr.CommitTxBatch(2, []*protos.Transaction{tx}, nil, []byte("proof"))
+ Expect(err).To(BeNil())
+ state, _ = ledgerPtr.GetState("chaincode1", "key1", true)
+ Expect(state).To(Equal([]byte("value1")))
+ state, _ = ledgerPtr.GetState("chaincode2", "key2", true)
+ Expect(state).To(Equal([]byte("value2")))
+ state, _ = ledgerPtr.GetState("chaincode3", "key3", true)
+ Expect(state).To(Equal([]byte("value3")))
+ })
+ It("creates and confirms the contents of a snapshot", func() {
+ Expect(ledgerPtr.BeginTxBatch(2)).To(BeNil())
+ snapshot, err := ledgerPtr.GetStateSnapshot()
+ Expect(err).To(BeNil())
+ defer snapshot.Release()
+ ledgerPtr.TxBegin("txUuid")
+ Expect(ledgerPtr.DeleteState("chaincode1", "key1")).To(BeNil())
+ Expect(ledgerPtr.SetState("chaincode4", "key4", []byte("value4"))).To(BeNil())
+ Expect(ledgerPtr.SetState("chaincode5", "key5", []byte("value5"))).To(BeNil())
+ Expect(ledgerPtr.SetState("chaincode6", "key6", []byte("value6"))).To(BeNil())
+ ledgerPtr.TxFinished("txUuid", true)
+ uuid := util.GenerateUUID()
+ tx, err := protos.NewTransaction(protos.ChaincodeID{Path: "testUrl"}, uuid, "anyfunction", []string{"param1, param2"})
+ Expect(err).To(BeNil())
+ err = ledgerPtr.CommitTxBatch(2, []*protos.Transaction{tx}, nil, []byte("proof"))
+ Expect(err).To(BeNil())
+
+ var count = 0
+ for snapshot.Next() {
+ //_, _ := snapshot.GetRawKeyValue()
+ //t.Logf("Key %v, Val %v", k, v)
+ count++
+ }
+ Expect(count).To(Equal(3))
+ Expect(snapshot.GetBlockNumber()).To(Equal(uint64(0)))
+ })
+ })
+
+ Describe("Ledger GetTempStateHashWithTxDeltaStateHashes", func() {
+ ledgerPtr := InitSpec()
+
+ It("creates, populates and finishes a transaction", func() {
+ Expect(ledgerPtr.BeginTxBatch(1)).To(BeNil())
+ ledgerPtr.TxBegin("txUuid1")
+ Expect(ledgerPtr.SetState("chaincode1", "key1", []byte("value1"))).To(BeNil())
+ ledgerPtr.TxFinished("txUuid1", true)
+ })
+ It("creates, populates and finishes a transaction", func() {
+ ledgerPtr.TxBegin("txUuid2")
+ Expect(ledgerPtr.SetState("chaincode2", "key2", []byte("value2"))).To(BeNil())
+ ledgerPtr.TxFinished("txUuid2", true)
+ })
+ It("creates, but does not populate and finishes a transaction", func() {
+ ledgerPtr.TxBegin("txUuid3")
+ ledgerPtr.TxFinished("txUuid3", true)
+ })
+ It("creates, populates and finishes a transaction", func() {
+ ledgerPtr.TxBegin("txUuid4")
+ Expect(ledgerPtr.SetState("chaincode4", "key4", []byte("value4"))).To(BeNil())
+ ledgerPtr.TxFinished("txUuid4", false)
+ })
+ It("should retrieve the delta state hash array containing expected values", func() {
+ _, txDeltaHashes, err := ledgerPtr.GetTempStateHashWithTxDeltaStateHashes()
+ Expect(err).To(BeNil())
+ Expect(util.ComputeCryptoHash(appendAll([]byte("chaincode1key1value1")))).To(Equal(txDeltaHashes["txUuid1"]))
+ Expect(util.ComputeCryptoHash(appendAll([]byte("chaincode2key2value2")))).To(Equal(txDeltaHashes["txUuid2"]))
+ Expect(txDeltaHashes["txUuid3"]).To(BeNil())
+ _, ok := txDeltaHashes["txUuid4"]
+ Expect(ok).To(Equal(false))
+ })
+ It("should commit the batch", func() {
+ Expect(ledgerPtr.CommitTxBatch(1, []*protos.Transaction{}, nil, []byte("proof"))).To(BeNil())
+ })
+ It("creates, populates and finishes a transaction", func() {
+ Expect(ledgerPtr.BeginTxBatch(2)).To(BeNil())
+ ledgerPtr.TxBegin("txUuid1")
+ Expect(ledgerPtr.SetState("chaincode1", "key1", []byte("value1"))).To(BeNil())
+ ledgerPtr.TxFinished("txUuid1", true)
+ })
+ It("should retrieve a delta state hash array of length 1", func() {
+ _, txDeltaHashes, err := ledgerPtr.GetTempStateHashWithTxDeltaStateHashes()
+ Expect(err).To(BeNil())
+ Expect(len(txDeltaHashes)).To(Equal(1))
+ })
+ })
+
+ Describe("Ledger PutRawBlock", func() {
+ ledgerPtr := InitSpec()
+
+ block := new(protos.Block)
+ block.PreviousBlockHash = []byte("foo")
+ block.StateHash = []byte("bar")
+ It("creates a raw block and puts it in the ledger without error", func() {
+ Expect(ledgerPtr.PutRawBlock(block, 4)).To(BeNil())
+ })
+ It("should return the same block that was stored", func() {
+ Expect(ledgerPtr.GetBlockByNumber(4)).To(Equal(block))
+ })
+ It("creates, populates and finishes a transaction", func() {
+ Expect(ledgerPtr.BeginTxBatch(1)).To(BeNil())
+ ledgerPtr.TxBegin("txUuid")
+ Expect(ledgerPtr.SetState("chaincode1", "key1", []byte("value1"))).To(BeNil())
+ ledgerPtr.TxFinished("txUuid", true)
+ })
+ It("should commit the batch", func() {
+ uuid := util.GenerateUUID()
+ tx, err := protos.NewTransaction(protos.ChaincodeID{Path: "testUrl"}, uuid, "anyfunction", []string{"param1, param2"})
+ Expect(err).To(BeNil())
+ err = ledgerPtr.CommitTxBatch(1, []*protos.Transaction{tx}, nil, []byte("proof"))
+ Expect(err).To(BeNil())
+ })
+ It("should have retrieved a block without error", func() {
+ previousHash, _ := block.GetHash()
+ newBlock, err := ledgerPtr.GetBlockByNumber(5)
+ Expect(err).To(BeNil())
+ Expect(newBlock.PreviousBlockHash).To(Equal(previousHash))
+ })
+ })
+
+ Describe("Ledger SetRawState", func() {
+ //var hash1, hash2, hash3 []byte
+ //var snapshot *state.StateSnapshot
+ var hash1, hash2, hash3 []byte
+ var err error
+ ledgerPtr := InitSpec()
+
+ It("creates, populates and finishes a batch", func() {
+ Expect(ledgerPtr.BeginTxBatch(1)).To(BeNil())
+ ledgerPtr.TxBegin("txUuid")
+ Expect(ledgerPtr.SetState("chaincode1", "key1", []byte("value1"))).To(BeNil())
+ Expect(ledgerPtr.SetState("chaincode2", "key2", []byte("value2"))).To(BeNil())
+ Expect(ledgerPtr.SetState("chaincode3", "key3", []byte("value3"))).To(BeNil())
+ ledgerPtr.TxFinished("txUuid", true)
+ })
+ It("should commit the batch", func() {
+ uuid := util.GenerateUUID()
+ tx, err := protos.NewTransaction(protos.ChaincodeID{Path: "testUrl"}, uuid, "anyfunction", []string{"param1, param2"})
+ Expect(err).To(BeNil())
+ err = ledgerPtr.CommitTxBatch(1, []*protos.Transaction{tx}, nil, []byte("proof"))
+ Expect(err).To(BeNil())
+ })
+ It("should validate that the state is what was committed", func() {
+ // Ensure values are in the DB
+ val, err := ledgerPtr.GetState("chaincode1", "key1", true)
+ Expect(bytes.Compare(val, []byte("value1"))).To(Equal(0))
+ Expect(err).To(BeNil())
+ val, err = ledgerPtr.GetState("chaincode2", "key2", true)
+ Expect(bytes.Compare(val, []byte("value2"))).To(Equal(0))
+ Expect(err).To(BeNil())
+ val, err = ledgerPtr.GetState("chaincode3", "key3", true)
+ Expect(bytes.Compare(val, []byte("value3"))).To(Equal(0))
+ Expect(err).To(BeNil())
+ })
+ It("should get state hash without error", func() {
+ hash1, err = ledgerPtr.GetTempStateHash()
+ Expect(err).To(BeNil())
+ })
+ It("should set raw state without error", func() {
+ snapshot, err := ledgerPtr.GetStateSnapshot()
+ Expect(err).To(BeNil())
+ defer snapshot.Release()
+ Expect(ledgerPtr.BeginTxBatch(2)).To(BeNil())
+ ledgerPtr.TxBegin("txUuid2")
+ Expect(ledgerPtr.DeleteState("chaincode1", "key1")).To(BeNil())
+ Expect(ledgerPtr.DeleteState("chaincode2", "key2")).To(BeNil())
+ Expect(ledgerPtr.DeleteState("chaincode3", "key3")).To(BeNil())
+ ledgerPtr.TxFinished("txUuid2", true)
+ uuid := util.GenerateUUID()
+ tx, err := protos.NewTransaction(protos.ChaincodeID{Path: "testUrl"}, uuid, "anyfunction", []string{"param1, param2"})
+ Expect(err).To(BeNil())
+ err = ledgerPtr.CommitTxBatch(2, []*protos.Transaction{tx}, nil, []byte("proof"))
+ Expect(err).To(BeNil())
+
+ state, _ := ledgerPtr.GetState("chaincode1", "key1", true)
+ Expect(state).To(BeNil())
+ state, _ = ledgerPtr.GetState("chaincode2", "key2", true)
+ Expect(state).To(BeNil())
+ state, _ = ledgerPtr.GetState("chaincode3", "key3", true)
+ Expect(state).To(BeNil())
+ hash2, err = ledgerPtr.GetTempStateHash()
+ Expect(err).To(BeNil())
+ Expect(bytes.Compare(hash1, hash2)).ToNot(Equal(0))
+ // put key/values from the snapshot back in the DB
+ //var keys, values [][]byte
+ delta := statemgmt.NewStateDelta()
+ for i := 0; snapshot.Next(); i++ {
+ k, v := snapshot.GetRawKeyValue()
+ cID, keyID := statemgmt.DecodeCompositeKey(k)
+ delta.Set(cID, keyID, v, nil)
+ }
+ ledgerPtr.ApplyStateDelta(1, delta)
+ ledgerPtr.CommitStateDelta(1)
+ })
+ It("should return restored state", func() {
+ state, _ := ledgerPtr.GetState("chaincode1", "key1", true)
+ Expect(state).To(Equal([]byte("value1")))
+ state, _ = ledgerPtr.GetState("chaincode2", "key2", true)
+ Expect(state).To(Equal([]byte("value2")))
+ state, _ = ledgerPtr.GetState("chaincode3", "key3", true)
+ Expect(state).To(Equal([]byte("value3")))
+ })
+ It("should get state hash without error", func() {
+ hash3, err = ledgerPtr.GetTempStateHash()
+ Expect(err).To(BeNil())
+ })
+ It("should match the current hash with the originally returned hash", func() {
+ Expect(bytes.Compare(hash1, hash3)).To(Equal(0))
+ })
+ })
+
+ Describe("Ledger VerifyChain", func() {
+ ledgerPtr := InitSpec()
+
+ // Build a big blockchain
+ It("creates, populates, finishes and commits a large blockchain", func() {
+ for i := 0; i < 100; i++ {
+ Expect(ledgerPtr.BeginTxBatch(i)).To(BeNil())
+ ledgerPtr.TxBegin("txUuid" + strconv.Itoa(i))
+ Expect(ledgerPtr.SetState("chaincode"+strconv.Itoa(i), "key"+strconv.Itoa(i), []byte("value"+strconv.Itoa(i)))).To(BeNil())
+ ledgerPtr.TxFinished("txUuid"+strconv.Itoa(i), true)
+
+ uuid := util.GenerateUUID()
+ tx, err := protos.NewTransaction(protos.ChaincodeID{Path: "testUrl"}, uuid, "anyfunction", []string{"param1, param2"})
+ Expect(err).To(BeNil())
+ err = ledgerPtr.CommitTxBatch(i, []*protos.Transaction{tx}, nil, []byte("proof"))
+ Expect(err).To(BeNil())
+ }
+ })
+ It("verifies the blockchain", func() {
+ // Verify the chain
+ for lowBlock := uint64(0); lowBlock < ledgerPtr.GetBlockchainSize()-1; lowBlock++ {
+ Expect(ledgerPtr.VerifyChain(ledgerPtr.GetBlockchainSize()-1, lowBlock)).To(Equal(lowBlock))
+ }
+ for highBlock := ledgerPtr.GetBlockchainSize() - 1; highBlock > 0; highBlock-- {
+ Expect(ledgerPtr.VerifyChain(highBlock, 0)).To(Equal(uint64(0)))
+ }
+ })
+ It("adds bad blocks to the blockchain", func() {
+ // Add bad blocks and test
+ badBlock := protos.NewBlock(nil, nil)
+ badBlock.PreviousBlockHash = []byte("evil")
+ for i := uint64(0); i < ledgerPtr.GetBlockchainSize(); i++ {
+ goodBlock, _ := ledgerPtr.GetBlockByNumber(i)
+ ledgerPtr.PutRawBlock(badBlock, i)
+ for lowBlock := uint64(0); lowBlock < ledgerPtr.GetBlockchainSize()-1; lowBlock++ {
+ if i == ledgerPtr.GetBlockchainSize()-1 {
+ Expect(ledgerPtr.VerifyChain(ledgerPtr.GetBlockchainSize()-1, lowBlock)).To(Equal(uint64(i)))
+ } else if i >= lowBlock {
+ Expect(ledgerPtr.VerifyChain(ledgerPtr.GetBlockchainSize()-1, lowBlock)).To(Equal(uint64(i + 1)))
+ } else {
+ Expect(ledgerPtr.VerifyChain(ledgerPtr.GetBlockchainSize()-1, lowBlock)).To(Equal(lowBlock))
+ }
+ }
+ for highBlock := ledgerPtr.GetBlockchainSize() - 1; highBlock > 0; highBlock-- {
+ if i == highBlock {
+ Expect(ledgerPtr.VerifyChain(highBlock, 0)).To(Equal(uint64(i)))
+ } else if i < highBlock {
+ Expect(ledgerPtr.VerifyChain(highBlock, 0)).To(Equal(uint64(i + 1)))
+ } else {
+ Expect(ledgerPtr.VerifyChain(highBlock, 0)).To(Equal(uint64(0)))
+ }
+ }
+ Expect(ledgerPtr.PutRawBlock(goodBlock, i)).To(BeNil())
+ }
+ })
+ // Test edge cases
+ It("tests some edge cases", func() {
+ _, err := ledgerPtr.VerifyChain(2, 10)
+ Expect(err).To(Equal(ledger.ErrOutOfBounds))
+ _, err = ledgerPtr.VerifyChain(0, 100)
+ Expect(err).To(Equal(ledger.ErrOutOfBounds))
+ })
+ })
+
+ Describe("Ledger BlockNumberOutOfBoundsError", func() {
+ ledgerPtr := InitSpec()
+
+ // Build a big blockchain
+ It("creates, populates, finishes and commits a large blockchain", func() {
+ for i := 0; i < 10; i++ {
+ Expect(ledgerPtr.BeginTxBatch(i)).To(BeNil())
+ ledgerPtr.TxBegin("txUuid" + strconv.Itoa(i))
+ Expect(ledgerPtr.SetState("chaincode"+strconv.Itoa(i), "key"+strconv.Itoa(i), []byte("value"+strconv.Itoa(i)))).To(BeNil())
+ ledgerPtr.TxFinished("txUuid"+strconv.Itoa(i), true)
+ uuid := util.GenerateUUID()
+ tx, err := protos.NewTransaction(protos.ChaincodeID{Path: "testUrl"}, uuid, "anyfunction", []string{"param1, param2"})
+ Expect(err).To(BeNil())
+ err = ledgerPtr.CommitTxBatch(i, []*protos.Transaction{tx}, nil, []byte("proof"))
+ Expect(err).To(BeNil())
+ }
+ })
+ It("forces some ErrOutOfBounds conditions", func() {
+ ledgerPtr.GetBlockByNumber(9)
+ _, err := ledgerPtr.GetBlockByNumber(10)
+ Expect(err).To(Equal(ledger.ErrOutOfBounds))
+
+ ledgerPtr.GetStateDelta(9)
+ _, err = ledgerPtr.GetStateDelta(10)
+ Expect(err).To(Equal(ledger.ErrOutOfBounds))
+ })
+ })
+
+ Describe("Ledger RollBackwardsAndForwards", func() {
+ ledgerPtr := InitSpec()
+
+ // Block 0
+ It("creates, populates, finishes, commits and validates a batch", func() {
+ Expect(ledgerPtr.BeginTxBatch(0)).To(BeNil())
+ ledgerPtr.TxBegin("txUuid1")
+ Expect(ledgerPtr.SetState("chaincode1", "key1", []byte("value1A"))).To(BeNil())
+ Expect(ledgerPtr.SetState("chaincode2", "key2", []byte("value2A"))).To(BeNil())
+ Expect(ledgerPtr.SetState("chaincode3", "key3", []byte("value3A"))).To(BeNil())
+ ledgerPtr.TxFinished("txUuid1", true)
+
+ uuid := util.GenerateUUID()
+ tx, err := protos.NewTransaction(protos.ChaincodeID{Path: "testUrl"}, uuid, "anyfunction", []string{"param1, param2"})
+ Expect(err).To(BeNil())
+ err = ledgerPtr.CommitTxBatch(0, []*protos.Transaction{tx}, nil, []byte("proof"))
+ Expect(err).To(BeNil())
+
+ state, _ := ledgerPtr.GetState("chaincode1", "key1", true)
+ Expect(state).To(Equal([]byte("value1A")))
+ state, _ = ledgerPtr.GetState("chaincode2", "key2", true)
+ Expect(state).To(Equal([]byte("value2A")))
+ state, _ = ledgerPtr.GetState("chaincode3", "key3", true)
+ Expect(state).To(Equal([]byte("value3A")))
+ })
+ // Block 1
+ It("creates, populates and finishes a batch", func() {
+ Expect(ledgerPtr.BeginTxBatch(1)).To(BeNil())
+ ledgerPtr.TxBegin("txUuid1")
+ Expect(ledgerPtr.SetState("chaincode1", "key1", []byte("value1B"))).To(BeNil())
+ Expect(ledgerPtr.SetState("chaincode2", "key2", []byte("value2B"))).To(BeNil())
+ Expect(ledgerPtr.SetState("chaincode3", "key3", []byte("value3B"))).To(BeNil())
+ ledgerPtr.TxFinished("txUuid1", true)
+ })
+ It("should commit the batch", func() {
+ uuid := util.GenerateUUID()
+ tx, err := protos.NewTransaction(protos.ChaincodeID{Path: "testUrl"}, uuid, "anyfunction", []string{"param1, param2"})
+ Expect(err).To(BeNil())
+ err = ledgerPtr.CommitTxBatch(1, []*protos.Transaction{tx}, nil, []byte("proof"))
+ Expect(err).To(BeNil())
+ })
+ It("should return committed state from batch 2", func() {
+ state, _ := ledgerPtr.GetState("chaincode1", "key1", true)
+ Expect(state).To(Equal([]byte("value1B")))
+ state, _ = ledgerPtr.GetState("chaincode2", "key2", true)
+ Expect(state).To(Equal([]byte("value2B")))
+ state, _ = ledgerPtr.GetState("chaincode3", "key3", true)
+ Expect(state).To(Equal([]byte("value3B")))
+ })
+ // Block 2
+ It("creates, populates and finishes a batch", func() {
+ Expect(ledgerPtr.BeginTxBatch(2)).To(BeNil())
+ ledgerPtr.TxBegin("txUuid1")
+ Expect(ledgerPtr.SetState("chaincode1", "key1", []byte("value1C"))).To(BeNil())
+ Expect(ledgerPtr.SetState("chaincode2", "key2", []byte("value2C"))).To(BeNil())
+ Expect(ledgerPtr.SetState("chaincode3", "key3", []byte("value3C"))).To(BeNil())
+ Expect(ledgerPtr.SetState("chaincode4", "key4", []byte("value4C"))).To(BeNil())
+ ledgerPtr.TxFinished("txUuid1", true)
+ })
+ It("should commit the batch", func() {
+ uuid := util.GenerateUUID()
+ tx, err := protos.NewTransaction(protos.ChaincodeID{Path: "testUrl"}, uuid, "anyfunction", []string{"param1, param2"})
+ Expect(err).To(BeNil())
+ err = ledgerPtr.CommitTxBatch(2, []*protos.Transaction{tx}, nil, []byte("proof"))
+ Expect(err).To(BeNil())
+ })
+ It("should return committed state from batch 3", func() {
+ state, _ := ledgerPtr.GetState("chaincode1", "key1", true)
+ Expect(state).To(Equal([]byte("value1C")))
+ state, _ = ledgerPtr.GetState("chaincode2", "key2", true)
+ Expect(state).To(Equal([]byte("value2C")))
+ state, _ = ledgerPtr.GetState("chaincode3", "key3", true)
+ Expect(state).To(Equal([]byte("value3C")))
+ state, _ = ledgerPtr.GetState("chaincode4", "key4", true)
+ Expect(state).To(Equal([]byte("value4C")))
+ })
+ // Roll backwards once
+ It("rolls backwards once without error", func() {
+ delta2, err := ledgerPtr.GetStateDelta(2)
+ Expect(err).To(BeNil())
+ delta2.RollBackwards = true
+ err = ledgerPtr.ApplyStateDelta(1, delta2)
+ Expect(err).To(BeNil())
+ err = ledgerPtr.CommitStateDelta(1)
+ Expect(err).To(BeNil())
+ })
+ PIt("should return committed state from batch 2 after rollback", func() {
+ state, _ := ledgerPtr.GetState("chaincode1", "key1", true)
+ Expect(state).To(Equal([]byte("value1B")))
+ state, _ = ledgerPtr.GetState("chaincode2", "key2", true)
+ Expect(state).To(Equal([]byte("value2B")))
+ state, _ = ledgerPtr.GetState("chaincode3", "key3", true)
+ Expect(state).To(Equal([]byte("value3B")))
+ state, _ = ledgerPtr.GetState("chaincode4", "key4", true)
+ Expect(state).To(BeNil())
+ })
+ // Now roll forwards once
+ It("rolls forwards once without error", func() {
+ delta2, err := ledgerPtr.GetStateDelta(2)
+ Expect(err).To(BeNil())
+ delta2.RollBackwards = false
+ err = ledgerPtr.ApplyStateDelta(2, delta2)
+ Expect(err).To(BeNil())
+ err = ledgerPtr.CommitStateDelta(2)
+ Expect(err).To(BeNil())
+ })
+ It("should return committed state from batch 3 after roll forward", func() {
+ state, _ := ledgerPtr.GetState("chaincode1", "key1", true)
+ Expect(state).To(Equal([]byte("value1C")))
+ state, _ = ledgerPtr.GetState("chaincode2", "key2", true)
+ Expect(state).To(Equal([]byte("value2C")))
+ state, _ = ledgerPtr.GetState("chaincode3", "key3", true)
+ Expect(state).To(Equal([]byte("value3C")))
+ state, _ = ledgerPtr.GetState("chaincode4", "key4", true)
+ Expect(state).To(Equal([]byte("value4C")))
+ })
+ It("rolls backwards twice without error", func() {
+ delta2, err := ledgerPtr.GetStateDelta(2)
+ Expect(err).To(BeNil())
+ delta2.RollBackwards = true
+ delta1, err := ledgerPtr.GetStateDelta(1)
+ Expect(err).To(BeNil())
+ err = ledgerPtr.ApplyStateDelta(3, delta2)
+ Expect(err).To(BeNil())
+ err = ledgerPtr.CommitStateDelta(3)
+ Expect(err).To(BeNil())
+ delta1.RollBackwards = true
+ err = ledgerPtr.ApplyStateDelta(4, delta1)
+ Expect(err).To(BeNil())
+ err = ledgerPtr.CommitStateDelta(4)
+ Expect(err).To(BeNil())
+ })
+ It("should return committed state from batch 1 after rollback", func() {
+ state, _ := ledgerPtr.GetState("chaincode1", "key1", true)
+ Expect(state).To(Equal([]byte("value1A")))
+ state, _ = ledgerPtr.GetState("chaincode2", "key2", true)
+ Expect(state).To(Equal([]byte("value2A")))
+ state, _ = ledgerPtr.GetState("chaincode3", "key3", true)
+ Expect(state).To(Equal([]byte("value3A")))
+ })
+
+ // Now roll forwards twice
+ It("rolls forwards twice without error", func() {
+ delta2, err := ledgerPtr.GetStateDelta(2)
+ Expect(err).To(BeNil())
+ delta2.RollBackwards = false
+ delta1, err := ledgerPtr.GetStateDelta(1)
+ Expect(err).To(BeNil())
+ delta1.RollBackwards = false
+
+ err = ledgerPtr.ApplyStateDelta(5, delta1)
+ Expect(err).To(BeNil())
+ err = ledgerPtr.CommitStateDelta(5)
+ Expect(err).To(BeNil())
+ delta1.RollBackwards = false
+ err = ledgerPtr.ApplyStateDelta(6, delta2)
+ Expect(err).To(BeNil())
+ err = ledgerPtr.CommitStateDelta(6)
+ Expect(err).To(BeNil())
+ })
+ It("should return committed state from batch 3 after roll forward", func() {
+ state, _ := ledgerPtr.GetState("chaincode1", "key1", true)
+ Expect(state).To(Equal([]byte("value1C")))
+ state, _ = ledgerPtr.GetState("chaincode2", "key2", true)
+ Expect(state).To(Equal([]byte("value2C")))
+ state, _ = ledgerPtr.GetState("chaincode3", "key3", true)
+ Expect(state).To(Equal([]byte("value3C")))
+ state, _ = ledgerPtr.GetState("chaincode4", "key4", true)
+ Expect(state).To(Equal([]byte("value4C")))
+ })
+ })
+
+ Describe("Ledger InvalidOrderDelta", func() {
+ ledgerPtr := InitSpec()
+ var delta *statemgmt.StateDelta
+
+ // Block 0
+ It("creates, populates and finishes a batch", func() {
+ Expect(ledgerPtr.BeginTxBatch(0)).To(BeNil())
+ ledgerPtr.TxBegin("txUuid1")
+ Expect(ledgerPtr.SetState("chaincode1", "key1", []byte("value1A"))).To(BeNil())
+ Expect(ledgerPtr.SetState("chaincode2", "key2", []byte("value2A"))).To(BeNil())
+ Expect(ledgerPtr.SetState("chaincode3", "key3", []byte("value3A"))).To(BeNil())
+ ledgerPtr.TxFinished("txUuid1", true)
+ })
+ It("should commit the batch", func() {
+ uuid := util.GenerateUUID()
+ tx, err := protos.NewTransaction(protos.ChaincodeID{Path: "testUrl"}, uuid, "anyfunction", []string{"param1, param2"})
+ Expect(err).To(BeNil())
+ err = ledgerPtr.CommitTxBatch(0, []*protos.Transaction{tx}, nil, []byte("proof"))
+ Expect(err).To(BeNil())
+ })
+ It("should return committed state from batch 1", func() {
+
+ state, _ := ledgerPtr.GetState("chaincode1", "key1", true)
+ Expect(state).To(Equal([]byte("value1A")))
+ state, _ = ledgerPtr.GetState("chaincode2", "key2", true)
+ Expect(state).To(Equal([]byte("value2A")))
+ state, _ = ledgerPtr.GetState("chaincode3", "key3", true)
+ Expect(state).To(Equal([]byte("value3A")))
+ })
+ // Block 1
+ It("creates, populates and finishes a batch", func() {
+ Expect(ledgerPtr.BeginTxBatch(1)).To(BeNil())
+ ledgerPtr.TxBegin("txUuid1")
+ Expect(ledgerPtr.SetState("chaincode1", "key1", []byte("value1B"))).To(BeNil())
+ Expect(ledgerPtr.SetState("chaincode2", "key2", []byte("value2B"))).To(BeNil())
+ Expect(ledgerPtr.SetState("chaincode3", "key3", []byte("value3B"))).To(BeNil())
+ ledgerPtr.TxFinished("txUuid1", true)
+ })
+ It("should commit the batch", func() {
+ uuid := util.GenerateUUID()
+ tx, err := protos.NewTransaction(protos.ChaincodeID{Path: "testUrl"}, uuid, "anyfunction", []string{"param1, param2"})
+ Expect(err).To(BeNil())
+ err = ledgerPtr.CommitTxBatch(1, []*protos.Transaction{tx}, nil, []byte("proof"))
+ Expect(err).To(BeNil())
+ })
+ It("should return committed state", func() {
+ state, _ := ledgerPtr.GetState("chaincode1", "key1", true)
+ Expect(state).To(Equal([]byte("value1B")))
+ state, _ = ledgerPtr.GetState("chaincode2", "key2", true)
+ Expect(state).To(Equal([]byte("value2B")))
+ state, _ = ledgerPtr.GetState("chaincode3", "key3", true)
+ Expect(state).To(Equal([]byte("value3B")))
+ })
+ It("should return error trying to commit state delta", func() {
+ delta, _ = ledgerPtr.GetStateDelta(1)
+ Expect(ledgerPtr.CommitStateDelta(1)).ToNot(BeNil())
+ })
+ It("should return error trying to rollback batch", func() {
+ Expect(ledgerPtr.RollbackTxBatch(1)).ToNot(BeNil())
+ })
+ It("should return error trying to apply state delta", func() {
+ Expect(ledgerPtr.ApplyStateDelta(2, delta)).To(BeNil())
+ Expect(ledgerPtr.ApplyStateDelta(3, delta)).ToNot(BeNil())
+ })
+ It("should return error trying to commit state delta", func() {
+ Expect(ledgerPtr.CommitStateDelta(3)).ToNot(BeNil())
+ })
+ It("should return error trying to rollback state delta", func() {
+ Expect(ledgerPtr.RollbackStateDelta(3)).ToNot(BeNil())
+ })
+ })
+
+ Describe("Ledger ApplyDeltaHash", func() {
+ ledgerPtr := InitSpec()
+
+ // Block 0
+ It("creates, populates, finishes, commits and validates three batches", func() {
+ Expect(ledgerPtr.BeginTxBatch(0)).To(BeNil())
+ ledgerPtr.TxBegin("txUuid1")
+ Expect(ledgerPtr.SetState("chaincode1", "key1", []byte("value1A"))).To(BeNil())
+ Expect(ledgerPtr.SetState("chaincode2", "key2", []byte("value2A"))).To(BeNil())
+ Expect(ledgerPtr.SetState("chaincode3", "key3", []byte("value3A"))).To(BeNil())
+ ledgerPtr.TxFinished("txUuid1", true)
+ uuid := util.GenerateUUID()
+ tx, err := protos.NewTransaction(protos.ChaincodeID{Path: "testUrl"}, uuid, "anyfunction", []string{"param1, param2"})
+ Expect(err).To(BeNil())
+ err = ledgerPtr.CommitTxBatch(0, []*protos.Transaction{tx}, nil, []byte("proof"))
+ Expect(err).To(BeNil())
+
+ // Block 1
+ Expect(ledgerPtr.BeginTxBatch(1)).To(BeNil())
+ ledgerPtr.TxBegin("txUuid1")
+ Expect(ledgerPtr.SetState("chaincode1", "key1", []byte("value1B"))).To(BeNil())
+ Expect(ledgerPtr.SetState("chaincode2", "key2", []byte("value2B"))).To(BeNil())
+ Expect(ledgerPtr.SetState("chaincode3", "key3", []byte("value3B"))).To(BeNil())
+ ledgerPtr.TxFinished("txUuid1", true)
+ uuid = util.GenerateUUID()
+ tx, err = protos.NewTransaction(protos.ChaincodeID{Path: "testUrl"}, uuid, "anyfunction", []string{"param1, param2"})
+ Expect(err).To(BeNil())
+ err = ledgerPtr.CommitTxBatch(1, []*protos.Transaction{tx}, nil, []byte("proof"))
+ Expect(err).To(BeNil())
+
+ // Block 2
+ Expect(ledgerPtr.BeginTxBatch(2)).To(BeNil())
+ ledgerPtr.TxBegin("txUuid1")
+ Expect(ledgerPtr.SetState("chaincode1", "key1", []byte("value1C"))).To(BeNil())
+ Expect(ledgerPtr.SetState("chaincode2", "key2", []byte("value2C"))).To(BeNil())
+ Expect(ledgerPtr.SetState("chaincode3", "key3", []byte("value3C"))).To(BeNil())
+ Expect(ledgerPtr.SetState("chaincode4", "key4", []byte("value4C"))).To(BeNil())
+ ledgerPtr.TxFinished("txUuid1", true)
+ uuid = util.GenerateUUID()
+ tx, err = protos.NewTransaction(protos.ChaincodeID{Path: "testUrl"}, uuid, "anyfunction", []string{"param1, param2"})
+ Expect(err).To(BeNil())
+ err = ledgerPtr.CommitTxBatch(2, []*protos.Transaction{tx}, nil, []byte("proof"))
+ Expect(err).To(BeNil())
+ })
+ It("should roll backwards, then forwards and apply and commit a state delta", func() {
+ hash2, err := ledgerPtr.GetTempStateHash()
+ Expect(err).To(BeNil())
+
+ // Roll backwards once
+ delta2, err := ledgerPtr.GetStateDelta(2)
+ Expect(err).To(BeNil())
+ delta2.RollBackwards = true
+ err = ledgerPtr.ApplyStateDelta(1, delta2)
+ Expect(err).To(BeNil())
+
+ preHash1, err := ledgerPtr.GetTempStateHash()
+ Expect(err).To(BeNil())
+ Expect(preHash1).ToNot(Equal(hash2))
+
+ err = ledgerPtr.CommitStateDelta(1)
+ Expect(err).To(BeNil())
+ hash1, err := ledgerPtr.GetTempStateHash()
+ Expect(err).To(BeNil())
+ Expect(preHash1).To(Equal(hash1))
+ Expect(hash1).ToNot(Equal(hash2))
+
+ // Roll forwards once
+ delta2.RollBackwards = false
+ err = ledgerPtr.ApplyStateDelta(2, delta2)
+ Expect(err).To(BeNil())
+
+ preHash2, err := ledgerPtr.GetTempStateHash()
+ Expect(err).To(BeNil())
+ Expect(preHash2).To(Equal(hash2))
+
+ err = ledgerPtr.RollbackStateDelta(2)
+ Expect(err).To(BeNil())
+
+ preHash2, err = ledgerPtr.GetTempStateHash()
+ Expect(err).To(BeNil())
+ Expect(preHash2).To(Equal(hash1))
+
+ err = ledgerPtr.ApplyStateDelta(3, delta2)
+ Expect(err).To(BeNil())
+ preHash2, err = ledgerPtr.GetTempStateHash()
+ Expect(err).To(BeNil())
+ Expect(preHash2).To(Equal(hash2))
+
+ err = ledgerPtr.CommitStateDelta(3)
+ Expect(err).To(BeNil())
+
+ preHash2, err = ledgerPtr.GetTempStateHash()
+ Expect(err).To(BeNil())
+ Expect(preHash2).To(Equal(hash2))
+ })
+ })
+
+ Describe("Ledger RangeScanIterator", func() {
+ ledgerPtr := InitSpec()
+ AssertIteratorContains := func(itr statemgmt.RangeScanIterator, expected map[string][]byte) {
+ count := 0
+ actual := make(map[string][]byte)
+ for itr.Next() {
+ count++
+ k, v := itr.GetKeyValue()
+ actual[k] = v
+ }
+
+ Expect(count).To(Equal(len(expected)))
+ for k, v := range expected {
+ Expect(actual[k]).To(Equal(v))
+ }
+ }
+ ///////// Test with an empty Ledger //////////
+ //////////////////////////////////////////////
+ It("does a bunch of stuff", func() {
+ itr, _ := ledgerPtr.GetStateRangeScanIterator("chaincodeID2", "key2", "key5", false)
+ expected := map[string][]byte{}
+ AssertIteratorContains(itr, expected)
+ itr.Close()
+
+ itr, _ = ledgerPtr.GetStateRangeScanIterator("chaincodeID2", "key2", "key5", true)
+ expected = map[string][]byte{}
+ AssertIteratorContains(itr, expected)
+ itr.Close()
+
+ // Commit initial data to ledger
+ ledgerPtr.BeginTxBatch(0)
+ ledgerPtr.TxBegin("txUuid1")
+ ledgerPtr.SetState("chaincodeID1", "key1", []byte("value1"))
+
+ ledgerPtr.SetState("chaincodeID2", "key1", []byte("value1"))
+ ledgerPtr.SetState("chaincodeID2", "key2", []byte("value2"))
+ ledgerPtr.SetState("chaincodeID2", "key3", []byte("value3"))
+
+ ledgerPtr.SetState("chaincodeID3", "key1", []byte("value1"))
+
+ ledgerPtr.SetState("chaincodeID4", "key1", []byte("value1"))
+ ledgerPtr.SetState("chaincodeID4", "key2", []byte("value2"))
+ ledgerPtr.SetState("chaincodeID4", "key3", []byte("value3"))
+ ledgerPtr.SetState("chaincodeID4", "key4", []byte("value4"))
+ ledgerPtr.SetState("chaincodeID4", "key5", []byte("value5"))
+ ledgerPtr.SetState("chaincodeID4", "key6", []byte("value6"))
+ ledgerPtr.SetState("chaincodeID4", "key7", []byte("value7"))
+
+ ledgerPtr.SetState("chaincodeID5", "key1", []byte("value5"))
+ ledgerPtr.SetState("chaincodeID6", "key1", []byte("value6"))
+
+ ledgerPtr.TxFinished("txUuid1", true)
+ uuid := util.GenerateUUID()
+ tx, err := protos.NewTransaction(protos.ChaincodeID{Path: "testUrl"}, uuid, "anyfunction", []string{"param1, param2"})
+ Expect(err).To(BeNil())
+ err = ledgerPtr.CommitTxBatch(0, []*protos.Transaction{tx}, nil, []byte("proof"))
+ Expect(err).To(BeNil())
+
+ // Add new keys and modify existing keys in on-going tx-batch
+ ledgerPtr.BeginTxBatch(1)
+ ledgerPtr.TxBegin("txUuid1")
+ ledgerPtr.SetState("chaincodeID4", "key2", []byte("value2_new"))
+ ledgerPtr.DeleteState("chaincodeID4", "key3")
+ ledgerPtr.SetState("chaincodeID4", "key8", []byte("value8_new"))
+
+ ///////////////////// Test with committed=true ///////////
+ //////////////////////////////////////////////////////////
+ // test range scan for chaincodeID4
+ itr, _ = ledgerPtr.GetStateRangeScanIterator("chaincodeID4", "key2", "key5", true)
+ expected = map[string][]byte{
+ "key2": []byte("value2"),
+ "key3": []byte("value3"),
+ "key4": []byte("value4"),
+ "key5": []byte("value5"),
+ }
+ AssertIteratorContains(itr, expected)
+ itr.Close()
+
+ // test with empty start-key
+ itr, _ = ledgerPtr.GetStateRangeScanIterator("chaincodeID4", "", "key5", true)
+ expected = map[string][]byte{
+ "key1": []byte("value1"),
+ "key2": []byte("value2"),
+ "key3": []byte("value3"),
+ "key4": []byte("value4"),
+ "key5": []byte("value5"),
+ }
+ AssertIteratorContains(itr, expected)
+ itr.Close()
+
+ // test with empty end-key
+ itr, _ = ledgerPtr.GetStateRangeScanIterator("chaincodeID4", "", "", true)
+ expected = map[string][]byte{
+ "key1": []byte("value1"),
+ "key2": []byte("value2"),
+ "key3": []byte("value3"),
+ "key4": []byte("value4"),
+ "key5": []byte("value5"),
+ "key6": []byte("value6"),
+ "key7": []byte("value7"),
+ }
+ AssertIteratorContains(itr, expected)
+ itr.Close()
+
+ ///////////////////// Test with committed=false ///////////
+ //////////////////////////////////////////////////////////
+ // test range scan for chaincodeID4
+ itr, _ = ledgerPtr.GetStateRangeScanIterator("chaincodeID4", "key2", "key5", false)
+ expected = map[string][]byte{
+ "key2": []byte("value2_new"),
+ "key4": []byte("value4"),
+ "key5": []byte("value5"),
+ }
+ AssertIteratorContains(itr, expected)
+ itr.Close()
+
+ // test with empty start-key
+ itr, _ = ledgerPtr.GetStateRangeScanIterator("chaincodeID4", "", "key5", false)
+ expected = map[string][]byte{
+ "key1": []byte("value1"),
+ "key2": []byte("value2_new"),
+ "key4": []byte("value4"),
+ "key5": []byte("value5"),
+ }
+ AssertIteratorContains(itr, expected)
+ itr.Close()
+
+ // test with empty end-key
+ itr, _ = ledgerPtr.GetStateRangeScanIterator("chaincodeID4", "", "", false)
+ expected = map[string][]byte{
+ "key1": []byte("value1"),
+ "key2": []byte("value2_new"),
+ "key4": []byte("value4"),
+ "key5": []byte("value5"),
+ "key6": []byte("value6"),
+ "key7": []byte("value7"),
+ "key8": []byte("value8_new"),
+ }
+ AssertIteratorContains(itr, expected)
+ itr.Close()
+ })
+ })
+
+ Describe("Ledger GetSetMultipleKeys", func() {
+ ledgerPtr := InitSpec()
+ It("creates, populates, finishes, commits and validates a batch", func() {
+ Expect(ledgerPtr.BeginTxBatch(1)).To(BeNil())
+ ledgerPtr.TxBegin("txUuid1")
+ ledgerPtr.SetStateMultipleKeys("chaincodeID", map[string][]byte{"key1": []byte("value1"), "key2": []byte("value2")})
+ ledgerPtr.TxFinished("txUuid1", true)
+ uuid := util.GenerateUUID()
+ tx, err := protos.NewTransaction(protos.ChaincodeID{Path: "testUrl"}, uuid, "anyfunction", []string{"param1, param2"})
+ Expect(err).To(BeNil())
+ err = ledgerPtr.CommitTxBatch(1, []*protos.Transaction{tx}, nil, []byte("proof"))
+ Expect(err).To(BeNil())
+ values, err := ledgerPtr.GetStateMultipleKeys("chaincodeID", []string{"key1", "key2"}, true)
+ Expect(err).To(BeNil())
+ Expect(values).To(Equal([][]byte{[]byte("value1"), []byte("value2")}))
+ })
+ })
+
+ Describe("Ledger CopyState", func() {
+ ledgerPtr := InitSpec()
+ It("creates, populates, finishes, commits and validates a batch", func() {
+ Expect(ledgerPtr.BeginTxBatch(0)).To(BeNil())
+ ledgerPtr.TxBegin("txUuid1")
+ ledgerPtr.SetStateMultipleKeys("chaincodeID", map[string][]byte{"key1": []byte("value1"), "key2": []byte("value2")})
+ ledgerPtr.TxFinished("txUuid1", true)
+ uuid := util.GenerateUUID()
+ tx, err := protos.NewTransaction(protos.ChaincodeID{Path: "testUrl"}, uuid, "anyfunction", []string{"param1, param2"})
+ Expect(err).To(BeNil())
+ err = ledgerPtr.CommitTxBatch(0, []*protos.Transaction{tx}, nil, []byte("proof"))
+ Expect(err).To(BeNil())
+ })
+ It("copies state without error and validates values are equal", func() {
+ Expect(ledgerPtr.BeginTxBatch(1)).To(BeNil())
+ ledgerPtr.TxBegin("txUuid1")
+ ledgerPtr.CopyState("chaincodeID", "chaincodeID2")
+ ledgerPtr.TxFinished("txUuid1", true)
+ uuid := util.GenerateUUID()
+ tx, err := protos.NewTransaction(protos.ChaincodeID{Path: "testUrl"}, uuid, "anyfunction", []string{"param1, param2"})
+ Expect(err).To(BeNil())
+ err = ledgerPtr.CommitTxBatch(1, []*protos.Transaction{tx}, nil, []byte("proof"))
+ Expect(err).To(BeNil())
+ values, err := ledgerPtr.GetStateMultipleKeys("chaincodeID2", []string{"key1", "key2"}, true)
+ Expect(err).To(BeNil())
+ Expect(values).To(Equal([][]byte{[]byte("value1"), []byte("value2")}))
+ })
+ })
+
+ Describe("Ledger EmptyArrayValue", func() {
+ ledgerPtr := InitSpec()
+ It("creates, populates, finishes, commits and validates a batch", func() {
+ Expect(ledgerPtr.BeginTxBatch(0)).To(BeNil())
+ ledgerPtr.TxBegin("txUuid1")
+ ledgerPtr.SetStateMultipleKeys("chaincodeID", map[string][]byte{"key1": []byte("value1"), "key2": []byte("value2")})
+ ledgerPtr.TxFinished("txUuid1", true)
+ uuid := util.GenerateUUID()
+ tx, err := protos.NewTransaction(protos.ChaincodeID{Path: "testUrl"}, uuid, "anyfunction", []string{"param1, param2"})
+ Expect(err).To(BeNil())
+ err = ledgerPtr.CommitTxBatch(0, []*protos.Transaction{tx}, nil, []byte("proof"))
+ Expect(err).To(BeNil())
+ value, err := ledgerPtr.GetState("chaincodeID", "key1", true)
+ Expect(err).To(BeNil())
+ Expect(value).ToNot(BeNil())
+ Expect(len(value)).To(Equal(6))
+ value, err = ledgerPtr.GetState("chaincodeID1", "non-existing-key", true)
+ var foo []byte
+ Expect(err).To(BeNil())
+ Expect(value).To(Equal(foo))
+ })
+ })
+
+ Describe("Ledger InvalidInput", func() {
+ ledgerPtr := InitSpec()
+ It("creates, populates, finishes, commits and validates a batch", func() {
+ Expect(ledgerPtr.BeginTxBatch(1)).To(BeNil())
+ ledgerPtr.TxBegin("txUuid1")
+ err := ledgerPtr.SetState("chaincodeID1", "key1", nil)
+ Expect(err).ToNot(BeNil())
+ ledgerErr, ok := err.(*ledger.Error)
+ Expect(ok && ledgerErr.Type() == ledger.ErrorTypeInvalidArgument).To(Equal(true))
+ err = ledgerPtr.SetState("chaincodeID1", "", []byte("value1"))
+ ledgerErr, ok = err.(*ledger.Error)
+ Expect(ok && ledgerErr.Type() == ledger.ErrorTypeInvalidArgument).To(Equal(true))
+ ledgerPtr.SetState("chaincodeID1", "key1", []byte("value1"))
+
+ ledgerPtr.TxFinished("txUuid1", true)
+ uuid := util.GenerateUUID()
+ tx, err := protos.NewTransaction(protos.ChaincodeID{Path: "testUrl"}, uuid, "anyfunction", []string{"param1, param2"})
+ Expect(err).To(BeNil())
+ err = ledgerPtr.CommitTxBatch(1, []*protos.Transaction{tx}, nil, []byte("proof"))
+ Expect(err).To(BeNil())
+ value, err := ledgerPtr.GetState("chaincodeID1", "key1", true)
+ Expect(err).To(BeNil())
+ Expect(value).To(Equal([]byte("value1")))
+ })
+ })
+})
diff --git a/core/ledger/test/test.yaml b/core/ledger/test/test.yaml
new file mode 100644
index 00000000000..1e53fdd7225
--- /dev/null
+++ b/core/ledger/test/test.yaml
@@ -0,0 +1,17 @@
+###############################################################################
+#
+# Peer section
+#
+###############################################################################
+peer:
+ # Path on the file system where peer will store data
+ fileSystemPath: /var/hyperledger/test/ledger_test
+
+ledger:
+
+ state:
+
+ # Control the number state deltas that are maintained. This takes additional
+ # disk space, but allow the state to be rolled backwards and forwards
+ # without the need to replay transactions.
+ deltaHistorySize: 500
diff --git a/core/ledger/testutil/test_util.go b/core/ledger/testutil/test_util.go
new file mode 100644
index 00000000000..ee91c951d3a
--- /dev/null
+++ b/core/ledger/testutil/test_util.go
@@ -0,0 +1,217 @@
+/*
+Copyright IBM Corp. 2016 All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package testutil
+
+import (
+ "crypto/rand"
+ "flag"
+ "fmt"
+ mathRand "math/rand"
+ "reflect"
+ "regexp"
+ "runtime"
+ "strings"
+ "testing"
+ "time"
+
+ "github.com/hyperledger/fabric/core/util"
+ "github.com/op/go-logging"
+ "github.com/spf13/viper"
+)
+
+type TestRandomNumberGenerator struct {
+ rand *mathRand.Rand
+ maxNumber int
+}
+
+func NewTestRandomNumberGenerator(maxNumber int) *TestRandomNumberGenerator {
+ return &TestRandomNumberGenerator{
+ mathRand.New(mathRand.NewSource(time.Now().UnixNano())),
+ maxNumber,
+ }
+}
+
+func (randNumGenerator *TestRandomNumberGenerator) Next() int {
+ return randNumGenerator.rand.Intn(randNumGenerator.maxNumber)
+}
+
+func SetupTestConfig() {
+ viper.AddConfigPath(".")
+ viper.SetEnvKeyReplacer(strings.NewReplacer(".", "_"))
+ viper.AutomaticEnv()
+ viper.SetDefault("peer.ledger.test.loadYAML", true)
+ loadYAML := viper.GetBool("peer.ledger.test.loadYAML")
+ if loadYAML {
+ viper.SetConfigName("test")
+ err := viper.ReadInConfig()
+ if err != nil { // Handle errors reading the config file
+ panic(fmt.Errorf("Fatal error config file: %s \n", err))
+ }
+ }
+ var formatter = logging.MustStringFormatter(
+ `%{color}%{time:15:04:05.000} [%{module}] %{shortfunc} [%{shortfile}] -> %{level:.4s} %{id:03x}%{color:reset} %{message}`,
+ )
+ logging.SetFormatter(formatter)
+}
+
+func SetLogLevel(level logging.Level, module string) {
+ logging.SetLevel(level, module)
+}
+
+func ParseTestParams() []string {
+ testParams := flag.String("testParams", "", "Test specific parameters")
+ flag.Parse()
+ regex, err := regexp.Compile(",(\\s+)?")
+ if err != nil {
+ panic(fmt.Errorf("err = %s\n", err))
+ }
+ paramsArray := regex.Split(*testParams, -1)
+ return paramsArray
+}
+
+func AssertNil(t testing.TB, value interface{}) {
+ if !isNil(value) {
+ t.Fatalf("Value not nil. value=[%#v]\n %s", value, getCallerInfo())
+ }
+}
+
+func AssertNotNil(t testing.TB, value interface{}) {
+ if isNil(value) {
+ t.Fatalf("Values is nil. %s", getCallerInfo())
+ }
+}
+
+func AssertSame(t testing.TB, actual interface{}, expected interface{}) {
+ t.Logf("%s: AssertSame [%#v] and [%#v]", getCallerInfo(), actual, expected)
+ if actual != expected {
+ t.Fatalf("Values actual=[%#v] and expected=[%#v] do not point to same object. %s", actual, expected, getCallerInfo())
+ }
+}
+
+func AssertEquals(t testing.TB, actual interface{}, expected interface{}) {
+ t.Logf("%s: AssertEquals [%#v] and [%#v]", getCallerInfo(), actual, expected)
+ if expected == nil && isNil(actual) {
+ return
+ }
+ if !reflect.DeepEqual(actual, expected) {
+ t.Fatalf("Values are not equal.\n Actual=[%#v], \n Expected=[%#v]\n %s", actual, expected, getCallerInfo())
+ }
+}
+
+func AssertNotEquals(t testing.TB, actual interface{}, expected interface{}) {
+ if reflect.DeepEqual(actual, expected) {
+ t.Fatalf("Values are not supposed to be equal. Actual=[%#v], Expected=[%#v]\n %s", actual, expected, getCallerInfo())
+ }
+}
+
+func AssertError(t testing.TB, err error, message string) {
+ if err == nil {
+ t.Fatalf("%s\n %s", message, getCallerInfo())
+ }
+}
+
+func AssertNoError(t testing.TB, err error, message string) {
+ if err != nil {
+ t.Fatalf("%s - Error: %s\n %s", message, err, getCallerInfo())
+ }
+}
+
+func AssertContains(t testing.TB, slice interface{}, value interface{}) {
+ if reflect.TypeOf(slice).Kind() != reflect.Slice && reflect.TypeOf(slice).Kind() != reflect.Array {
+ t.Fatalf("Type of argument 'slice' is expected to be a slice/array, found =[%s]\n %s", reflect.TypeOf(slice), getCallerInfo())
+ }
+
+ if !contains(slice, value) {
+ t.Fatalf("Expected value [%s] not found in slice %s\n %s", value, slice, getCallerInfo())
+ }
+}
+
+func AssertContainsAll(t testing.TB, sliceActual interface{}, sliceExpected interface{}) {
+ if reflect.TypeOf(sliceActual).Kind() != reflect.Slice && reflect.TypeOf(sliceActual).Kind() != reflect.Array {
+ t.Fatalf("Type of argument 'sliceActual' is expected to be a slice/array, found =[%s]\n %s", reflect.TypeOf(sliceActual), getCallerInfo())
+ }
+
+ if reflect.TypeOf(sliceExpected).Kind() != reflect.Slice && reflect.TypeOf(sliceExpected).Kind() != reflect.Array {
+ t.Fatalf("Type of argument 'sliceExpected' is expected to be a slice/array, found =[%s]\n %s", reflect.TypeOf(sliceExpected), getCallerInfo())
+ }
+
+ array := reflect.ValueOf(sliceExpected)
+ for i := 0; i < array.Len(); i++ {
+ element := array.Index(i).Interface()
+ if !contains(sliceActual, element) {
+ t.Fatalf("Expected value [%s] not found in slice %s\n %s", element, sliceActual, getCallerInfo())
+ }
+ }
+}
+
+func AssertPanic(t testing.TB, msg string) {
+ x := recover()
+ if x == nil {
+ t.Fatal(msg)
+ } else {
+ t.Logf("A panic was caught successfully. Actual msg = %s", x)
+ }
+}
+
+func ComputeCryptoHash(content ...[]byte) []byte {
+ return util.ComputeCryptoHash(AppendAll(content...))
+}
+
+func AppendAll(content ...[]byte) []byte {
+ combinedContent := []byte{}
+ for _, b := range content {
+ combinedContent = append(combinedContent, b...)
+ }
+ return combinedContent
+}
+
+func GenerateUUID(t *testing.T) string {
+ uuid := util.GenerateUUID()
+ return uuid
+}
+
+func ConstructRandomBytes(t testing.TB, size int) []byte {
+ value := make([]byte, size)
+ _, err := rand.Read(value)
+ if err != nil {
+ t.Fatalf("Error while generating random bytes: %s", err)
+ }
+ return value
+}
+
+func contains(slice interface{}, value interface{}) bool {
+ array := reflect.ValueOf(slice)
+ for i := 0; i < array.Len(); i++ {
+ element := array.Index(i).Interface()
+ if value == element || reflect.DeepEqual(element, value) {
+ return true
+ }
+ }
+ return false
+}
+
+func isNil(in interface{}) bool {
+ return in == nil || reflect.ValueOf(in).IsNil() || (reflect.TypeOf(in).Kind() == reflect.Slice && reflect.ValueOf(in).Len() == 0)
+}
+
+func getCallerInfo() string {
+ _, file, line, ok := runtime.Caller(2)
+ if !ok {
+ return "Could not retrieve caller's info"
+ }
+ return fmt.Sprintf("CallerInfo = [%s:%d]", file, line)
+}
diff --git a/core/ledger/testutil/test_util_test.go b/core/ledger/testutil/test_util_test.go
new file mode 100644
index 00000000000..3cb0a13871f
--- /dev/null
+++ b/core/ledger/testutil/test_util_test.go
@@ -0,0 +1,23 @@
+/*
+Copyright IBM Corp. 2016 All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package testutil
+
+import "testing"
+
+func TestSkipAll(t *testing.T) {
+ t.Skip(`No tests in this package for now - This package contains only utility functions that are meant to be used by other functional tests`)
+}
diff --git a/core/ledger/util/util.go b/core/ledger/util/util.go
new file mode 100644
index 00000000000..fdc768a7e2f
--- /dev/null
+++ b/core/ledger/util/util.go
@@ -0,0 +1,62 @@
+/*
+Copyright IBM Corp. 2016 All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package util
+
+import (
+ "encoding/binary"
+ "fmt"
+
+ "github.com/golang/protobuf/proto"
+)
+
+// EncodeOrderPreservingVarUint64 returns a byte-representation for a uint64 number such that
+// all zero-bits starting bytes are trimmed in order to reduce the length of the array
+// For preserving the order in a default bytes-comparison, first byte contains the number of remaining bytes.
+// The presence of first byte also allows to use the returned bytes as part of other larger byte array such as a
+// composite-key representation in db
+func EncodeOrderPreservingVarUint64(number uint64) []byte {
+ bytes := make([]byte, 8)
+ binary.BigEndian.PutUint64(bytes, number)
+ startingIndex := 0
+ size := 0
+ for i, b := range bytes {
+ if b != 0x00 {
+ startingIndex = i
+ size = 8 - i
+ break
+ }
+ }
+ sizeBytes := proto.EncodeVarint(uint64(size))
+ if len(sizeBytes) > 1 {
+ panic(fmt.Errorf("[]sizeBytes should not be more than one byte because the max number it needs to hold is 8. size=%d", size))
+ }
+ encodedBytes := make([]byte, size+1)
+ encodedBytes[0] = sizeBytes[0]
+ copy(encodedBytes[1:], bytes[startingIndex:])
+ return encodedBytes
+}
+
+// DecodeOrderPreservingVarUint64 decodes the number from the bytes obtained from method 'EncodeOrderPreservingVarUint64'.
+// Also, returns the number of bytes that are consumed in the process
+func DecodeOrderPreservingVarUint64(bytes []byte) (uint64, int) {
+ s, _ := proto.DecodeVarint(bytes)
+ size := int(s)
+ decodedBytes := make([]byte, 8)
+ copy(decodedBytes[8-size:], bytes[1:size+1])
+ numBytesConsumed := size + 1
+ return binary.BigEndian.Uint64(decodedBytes), numBytesConsumed
+}
diff --git a/core/ledger/util/util_test.go b/core/ledger/util/util_test.go
new file mode 100644
index 00000000000..ff71b835699
--- /dev/null
+++ b/core/ledger/util/util_test.go
@@ -0,0 +1,54 @@
+/*
+Copyright IBM Corp. 2016 All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package util
+
+import (
+ "bytes"
+ "testing"
+)
+
+func TestBasicEncodingDecoding(t *testing.T) {
+ for i := 0; i < 10000; i++ {
+ value := EncodeOrderPreservingVarUint64(uint64(i))
+ nextValue := EncodeOrderPreservingVarUint64(uint64(i + 1))
+ if !(bytes.Compare(value, nextValue) < 0) {
+ t.Fatalf("A smaller integer should result into smaller bytes. Encoded bytes for [%d] is [%x] and for [%d] is [%x]",
+ i, i+1, value, nextValue)
+ }
+ decodedValue, _ := DecodeOrderPreservingVarUint64(value)
+ if decodedValue != uint64(i) {
+ t.Fatalf("Value not same after decoding. Original value = [%d], decode value = [%d]", i, decodedValue)
+ }
+ }
+}
+
+func TestDecodingAppendedValues(t *testing.T) {
+ appendedValues := []byte{}
+ for i := 0; i < 1000; i++ {
+ appendedValues = append(appendedValues, EncodeOrderPreservingVarUint64(uint64(i))...)
+ }
+
+ len := 0
+ value := uint64(0)
+ for i := 0; i < 1000; i++ {
+ appendedValues = appendedValues[len:]
+ value, len = DecodeOrderPreservingVarUint64(appendedValues)
+ if value != uint64(i) {
+ t.Fatalf("expected value = [%d], decode value = [%d]", i, value)
+ }
+ }
+}
diff --git a/core/logging.go b/core/logging.go
new file mode 100644
index 00000000000..e3116300d84
--- /dev/null
+++ b/core/logging.go
@@ -0,0 +1,100 @@
+/*
+Copyright IBM Corp. 2016 All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package core
+
+import (
+ "os"
+ "strings"
+
+ "github.com/op/go-logging"
+ "github.com/spf13/viper"
+)
+
+// A logger to log logging logs!
+var loggingLogger = logging.MustGetLogger("logging")
+
+// The default logging level, in force until LoggingInit() is called or in
+// case of configuration errors.
+var loggingDefaultLevel = logging.INFO
+
+// LoggingInit is a 'hook' called at the beginning of command processing to
+// parse logging-related options specified either on the command-line or in
+// config files. Command-line options take precedence over config file
+// options, and can also be passed as suitably-named environment variables. To
+// change module logging levels at runtime call `logging.SetLevel(level,
+// module)`. To debug this routine include logging=debug as the first
+// term of the logging specification.
+func LoggingInit(command string) {
+ // Parse the logging specification in the form
+ // [[,...]=][:[[,...]=]...]
+ defaultLevel := loggingDefaultLevel
+ var err error
+ spec := viper.GetString("logging_level")
+ if spec == "" {
+ spec = viper.GetString("logging." + command)
+ }
+ if spec != "" {
+ fields := strings.Split(spec, ":")
+ for _, field := range fields {
+ split := strings.Split(field, "=")
+ switch len(split) {
+ case 1:
+ // Default level
+ defaultLevel, err = logging.LogLevel(field)
+ if err != nil {
+ loggingLogger.Warningf("Logging level '%s' not recognized, defaulting to %s : %s", field, loggingDefaultLevel, err)
+ defaultLevel = loggingDefaultLevel // NB - 'defaultLevel' was overwritten
+ }
+ case 2:
+ // [,...]=
+ if level, err := logging.LogLevel(split[1]); err != nil {
+ loggingLogger.Warningf("Invalid logging level in '%s' ignored", field)
+ } else if split[0] == "" {
+ loggingLogger.Warningf("Invalid logging override specification '%s' ignored - no module specified", field)
+ } else {
+ modules := strings.Split(split[0], ",")
+ for _, module := range modules {
+ logging.SetLevel(level, module)
+ loggingLogger.Debugf("Setting logging level for module '%s' to %s", module, level)
+ }
+ }
+ default:
+ loggingLogger.Warningf("Invalid logging override '%s' ignored; Missing ':' ?", field)
+ }
+ }
+ }
+ // Set the default logging level for all modules
+ logging.SetLevel(defaultLevel, "")
+ loggingLogger.Debugf("Setting default logging level to %s for command '%s'", defaultLevel, command)
+}
+
+// DefaultLoggingLevel returns the fallback value for loggers to use if parsing fails
+func DefaultLoggingLevel() logging.Level {
+ return loggingDefaultLevel
+}
+
+// Initiate 'leveled' logging to stderr.
+func init() {
+
+ format := logging.MustStringFormatter(
+ "%{color}%{time:15:04:05.000} [%{module}] %{shortfunc} -> %{level:.4s} %{id:03x}%{color:reset} %{message}",
+ )
+
+ backend := logging.NewLogBackend(os.Stderr, "", 0)
+ backendFormatter := logging.NewBackendFormatter(backend, format)
+ logging.SetBackend(backendFormatter).SetLevel(loggingDefaultLevel, "")
+}
diff --git a/core/logging_test.go b/core/logging_test.go
new file mode 100644
index 00000000000..716caa161e6
--- /dev/null
+++ b/core/logging_test.go
@@ -0,0 +1,157 @@
+/*
+Copyright IBM Corp. 2016 All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package core
+
+import (
+ "testing"
+
+ "github.com/op/go-logging"
+ "github.com/spf13/viper"
+)
+
+func TestLoggingLevelDefault(t *testing.T) {
+ viper.Reset()
+
+ LoggingInit("")
+
+ assertDefaultLoggingLevel(t, DefaultLoggingLevel())
+}
+
+func TestLoggingLevelOtherThanDefault(t *testing.T) {
+ viper.Reset()
+ viper.Set("logging_level", "warning")
+
+ LoggingInit("")
+
+ assertDefaultLoggingLevel(t, logging.WARNING)
+}
+
+func TestLoggingLevelForSpecificModule(t *testing.T) {
+ viper.Reset()
+ viper.Set("logging_level", "core=info")
+
+ LoggingInit("")
+
+ assertModuleLoggingLevel(t, "core", logging.INFO)
+}
+
+func TestLoggingLeveltForMultipleModules(t *testing.T) {
+ viper.Reset()
+ viper.Set("logging_level", "core=warning:test=debug")
+
+ LoggingInit("")
+
+ assertModuleLoggingLevel(t, "core", logging.WARNING)
+ assertModuleLoggingLevel(t, "test", logging.DEBUG)
+}
+
+func TestLoggingLevelForMultipleModulesAtSameLevel(t *testing.T) {
+ viper.Reset()
+ viper.Set("logging_level", "core,test=warning")
+
+ LoggingInit("")
+
+ assertModuleLoggingLevel(t, "core", logging.WARNING)
+ assertModuleLoggingLevel(t, "test", logging.WARNING)
+}
+
+func TestLoggingLevelForModuleWithDefault(t *testing.T) {
+ viper.Reset()
+ viper.Set("logging_level", "info:test=warning")
+
+ LoggingInit("")
+
+ assertDefaultLoggingLevel(t, logging.INFO)
+ assertModuleLoggingLevel(t, "test", logging.WARNING)
+}
+
+func TestLoggingLevelForModuleWithDefaultAtEnd(t *testing.T) {
+ viper.Reset()
+ viper.Set("logging_level", "test=warning:info")
+
+ LoggingInit("")
+
+ assertDefaultLoggingLevel(t, logging.INFO)
+ assertModuleLoggingLevel(t, "test", logging.WARNING)
+}
+
+func TestLoggingLevelForSpecificCommand(t *testing.T) {
+ viper.Reset()
+ viper.Set("logging.node", "error")
+
+ LoggingInit("node")
+
+ assertDefaultLoggingLevel(t, logging.ERROR)
+}
+
+func TestLoggingLevelForUnknownCommandGoesToDefault(t *testing.T) {
+ viper.Reset()
+
+ LoggingInit("unknown command")
+
+ assertDefaultLoggingLevel(t, DefaultLoggingLevel())
+}
+
+func TestLoggingLevelInvalid(t *testing.T) {
+ viper.Reset()
+ viper.Set("logging_level", "invalidlevel")
+
+ LoggingInit("")
+
+ assertDefaultLoggingLevel(t, DefaultLoggingLevel())
+}
+
+func TestLoggingLevelInvalidModules(t *testing.T) {
+ viper.Reset()
+ viper.Set("logging_level", "core=invalid")
+
+ LoggingInit("")
+
+ assertDefaultLoggingLevel(t, DefaultLoggingLevel())
+}
+
+func TestLoggingLevelInvalidEmptyModule(t *testing.T) {
+ viper.Reset()
+ viper.Set("logging_level", "=warning")
+
+ LoggingInit("")
+
+ assertDefaultLoggingLevel(t, DefaultLoggingLevel())
+}
+
+func TestLoggingLevelInvalidModuleSyntax(t *testing.T) {
+ viper.Reset()
+ viper.Set("logging_level", "type=warn=again")
+
+ LoggingInit("")
+
+ assertDefaultLoggingLevel(t, DefaultLoggingLevel())
+}
+
+func assertDefaultLoggingLevel(t *testing.T, expectedLevel logging.Level) {
+ assertModuleLoggingLevel(t, "", expectedLevel)
+}
+
+func assertModuleLoggingLevel(t *testing.T, module string, expectedLevel logging.Level) {
+ assertEquals(t, expectedLevel, logging.GetLevel(module))
+}
+
+func assertEquals(t *testing.T, expected interface{}, actual interface{}) {
+ if expected != actual {
+ t.Errorf("Expected: %v, Got: %v", expected, actual)
+ }
+}
diff --git a/core/peer/config.go b/core/peer/config.go
new file mode 100644
index 00000000000..1c8689f6521
--- /dev/null
+++ b/core/peer/config.go
@@ -0,0 +1,182 @@
+/*
+Copyright IBM Corp. 2016 All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// The 'viper' package for configuration handling is very flexible, but has
+// been found to have extremely poor performance when configuration values are
+// accessed repeatedly. The function CacheConfiguration() defined here caches
+// all configuration values that are accessed frequently. These parameters
+// are now presented as function calls that access local configuration
+// variables. This seems to be the most robust way to represent these
+// parameters in the face of the numerous ways that configuration files are
+// loaded and used (e.g, normal usage vs. test cases).
+
+// The CacheConfiguration() function is allowed to be called globally to
+// ensure that the correct values are always cached; See for example how
+// certain parameters are forced in 'ChaincodeDevMode' in main.go.
+
+package peer
+
+import (
+ "fmt"
+ "net"
+
+ "github.com/spf13/viper"
+
+ pb "github.com/hyperledger/fabric/protos"
+)
+
+// Is the configuration cached?
+var configurationCached = false
+
+// Cached values and error values of the computed constants getLocalAddress(),
+// getValidatorStreamAddress(), and getPeerEndpoint()
+var localAddress string
+var localAddressError error
+var peerEndpoint *pb.PeerEndpoint
+var peerEndpointError error
+
+// Cached values of commonly used configuration constants.
+var syncStateSnapshotChannelSize int
+var syncStateDeltasChannelSize int
+var syncBlocksChannelSize int
+var validatorEnabled bool
+
+// Note: There is some kind of circular import issue that prevents us from
+// importing the "core" package into the "peer" package. The
+// 'peer.SecurityEnabled' bit is a duplicate of the 'core.SecurityEnabled'
+// bit.
+var securityEnabled bool
+
+// CacheConfiguration computes and caches commonly-used constants and
+// computed constants as package variables. Routines which were previously
+// global have been embedded here to preserve the original abstraction.
+func CacheConfiguration() (err error) {
+
+ // getLocalAddress returns the address:port the local peer is operating on. Affected by env:peer.addressAutoDetect
+ getLocalAddress := func() (peerAddress string, err error) {
+ if viper.GetBool("peer.addressAutoDetect") {
+ // Need to get the port from the peer.address setting, and append to the determined host IP
+ _, port, err := net.SplitHostPort(viper.GetString("peer.address"))
+ if err != nil {
+ err = fmt.Errorf("Error auto detecting Peer's address: %s", err)
+ return "", err
+ }
+ peerAddress = net.JoinHostPort(GetLocalIP(), port)
+ peerLogger.Infof("Auto detected peer address: %s", peerAddress)
+ } else {
+ peerAddress = viper.GetString("peer.address")
+ }
+ return
+ }
+
+ // getPeerEndpoint returns the PeerEndpoint for this Peer instance. Affected by env:peer.addressAutoDetect
+ getPeerEndpoint := func() (*pb.PeerEndpoint, error) {
+ var peerAddress string
+ var peerType pb.PeerEndpoint_Type
+ peerAddress, err := getLocalAddress()
+ if err != nil {
+ return nil, err
+ }
+ if viper.GetBool("peer.validator.enabled") {
+ peerType = pb.PeerEndpoint_VALIDATOR
+ } else {
+ peerType = pb.PeerEndpoint_NON_VALIDATOR
+ }
+ return &pb.PeerEndpoint{ID: &pb.PeerID{Name: viper.GetString("peer.id")}, Address: peerAddress, Type: peerType}, nil
+ }
+
+ localAddress, localAddressError = getLocalAddress()
+ peerEndpoint, peerEndpointError = getPeerEndpoint()
+
+ syncStateSnapshotChannelSize = viper.GetInt("peer.sync.state.snapshot.channelSize")
+ syncStateDeltasChannelSize = viper.GetInt("peer.sync.state.deltas.channelSize")
+ syncBlocksChannelSize = viper.GetInt("peer.sync.blocks.channelSize")
+ validatorEnabled = viper.GetBool("peer.validator.enabled")
+
+ securityEnabled = viper.GetBool("security.enabled")
+
+ configurationCached = true
+
+ if localAddressError != nil {
+ return localAddressError
+ } else if peerEndpointError != nil {
+ return peerEndpointError
+ }
+ return
+}
+
+// cacheConfiguration logs an error if error checks have failed.
+func cacheConfiguration() {
+ if err := CacheConfiguration(); err != nil {
+ peerLogger.Errorf("Execution continues after CacheConfiguration() failure : %s", err)
+ }
+}
+
+//Functional forms
+
+// GetLocalAddress returns the peer.address property
+func GetLocalAddress() (string, error) {
+ if !configurationCached {
+ cacheConfiguration()
+ }
+ return localAddress, localAddressError
+}
+
+func GetPeerEndpoint() (*pb.PeerEndpoint, error) {
+ if !configurationCached {
+ cacheConfiguration()
+ }
+ return peerEndpoint, peerEndpointError
+}
+
+// SyncStateSnapshotChannelSize returns the peer.sync.state.snapshot.channelSize property
+func SyncStateSnapshotChannelSize() int {
+ if !configurationCached {
+ cacheConfiguration()
+ }
+ return syncStateSnapshotChannelSize
+}
+
+// SyncStateDeltasChannelSize returns the peer.sync.state.deltas.channelSize property
+func SyncStateDeltasChannelSize() int {
+ if !configurationCached {
+ cacheConfiguration()
+ }
+ return syncStateDeltasChannelSize
+}
+
+// SyncBlocksChannelSize returns the peer.sync.blocks.channelSize property
+func SyncBlocksChannelSize() int {
+ if !configurationCached {
+ cacheConfiguration()
+ }
+ return syncBlocksChannelSize
+}
+
+// ValidatorEnabled returns the peer.validator.enabled property
+func ValidatorEnabled() bool {
+ if !configurationCached {
+ cacheConfiguration()
+ }
+ return validatorEnabled
+}
+
+func SecurityEnabled() bool {
+ if !configurationCached {
+ cacheConfiguration()
+ }
+ return securityEnabled
+}
diff --git a/core/peer/errors.go b/core/peer/errors.go
new file mode 100644
index 00000000000..7c133dffb85
--- /dev/null
+++ b/core/peer/errors.go
@@ -0,0 +1,40 @@
+/*
+Copyright IBM Corp. 2016 All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package peer
+
+import (
+ "fmt"
+
+ pb "github.com/hyperledger/fabric/protos"
+)
+
+// DuplicateHandlerError returned if attempt to register same chaincodeID while a stream already exists.
+type DuplicateHandlerError struct {
+ To pb.PeerEndpoint
+}
+
+func (d *DuplicateHandlerError) Error() string {
+ return fmt.Sprintf("Duplicate Handler error: %s", d.To)
+}
+
+func newDuplicateHandlerError(msgHandler MessageHandler) error {
+ to, err := msgHandler.To()
+ if err != nil {
+ return fmt.Errorf("Error creating Duplicate Handler error: %s", err)
+ }
+ return &DuplicateHandlerError{To: to}
+}
diff --git a/core/peer/handler.go b/core/peer/handler.go
new file mode 100644
index 00000000000..8ff95d9a007
--- /dev/null
+++ b/core/peer/handler.go
@@ -0,0 +1,699 @@
+/*
+Copyright IBM Corp. 2016 All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package peer
+
+import (
+ "fmt"
+ "sync"
+ "time"
+
+ "github.com/golang/protobuf/proto"
+ "github.com/looplab/fsm"
+ "github.com/spf13/viper"
+
+ "github.com/hyperledger/fabric/core/ledger/statemgmt"
+ pb "github.com/hyperledger/fabric/protos"
+)
+
+// Handler peer handler implementation.
+type Handler struct {
+ chatMutex sync.Mutex
+ ToPeerEndpoint *pb.PeerEndpoint
+ Coordinator MessageHandlerCoordinator
+ ChatStream ChatStream
+ doneChan chan struct{}
+ FSM *fsm.FSM
+ initiatedStream bool // Was the stream initiated within this Peer
+ registered bool
+ syncBlocks chan *pb.SyncBlocks
+ snapshotRequestHandler *syncStateSnapshotRequestHandler
+ syncStateDeltasRequestHandler *syncStateDeltasHandler
+ syncBlocksRequestHandler *syncBlocksRequestHandler
+}
+
+// NewPeerHandler returns a new Peer handler
+// Is instance of HandlerFactory
+func NewPeerHandler(coord MessageHandlerCoordinator, stream ChatStream, initiatedStream bool, nextHandler MessageHandler) (MessageHandler, error) {
+
+ d := &Handler{
+ ChatStream: stream,
+ initiatedStream: initiatedStream,
+ Coordinator: coord,
+ }
+ d.doneChan = make(chan struct{})
+
+ d.snapshotRequestHandler = newSyncStateSnapshotRequestHandler()
+ d.syncStateDeltasRequestHandler = newSyncStateDeltasHandler()
+ d.syncBlocksRequestHandler = newSyncBlocksRequestHandler()
+ d.FSM = fsm.NewFSM(
+ "created",
+ fsm.Events{
+ {Name: pb.Message_DISC_HELLO.String(), Src: []string{"created"}, Dst: "established"},
+ {Name: pb.Message_DISC_GET_PEERS.String(), Src: []string{"established"}, Dst: "established"},
+ {Name: pb.Message_DISC_PEERS.String(), Src: []string{"established"}, Dst: "established"},
+ {Name: pb.Message_SYNC_BLOCK_ADDED.String(), Src: []string{"established"}, Dst: "established"},
+ {Name: pb.Message_SYNC_GET_BLOCKS.String(), Src: []string{"established"}, Dst: "established"},
+ {Name: pb.Message_SYNC_BLOCKS.String(), Src: []string{"established"}, Dst: "established"},
+ {Name: pb.Message_SYNC_STATE_GET_SNAPSHOT.String(), Src: []string{"established"}, Dst: "established"},
+ {Name: pb.Message_SYNC_STATE_SNAPSHOT.String(), Src: []string{"established"}, Dst: "established"},
+ {Name: pb.Message_SYNC_STATE_GET_DELTAS.String(), Src: []string{"established"}, Dst: "established"},
+ {Name: pb.Message_SYNC_STATE_DELTAS.String(), Src: []string{"established"}, Dst: "established"},
+ },
+ fsm.Callbacks{
+ "enter_state": func(e *fsm.Event) { d.enterState(e) },
+ "before_" + pb.Message_DISC_HELLO.String(): func(e *fsm.Event) { d.beforeHello(e) },
+ "before_" + pb.Message_DISC_GET_PEERS.String(): func(e *fsm.Event) { d.beforeGetPeers(e) },
+ "before_" + pb.Message_DISC_PEERS.String(): func(e *fsm.Event) { d.beforePeers(e) },
+ "before_" + pb.Message_SYNC_BLOCK_ADDED.String(): func(e *fsm.Event) { d.beforeBlockAdded(e) },
+ "before_" + pb.Message_SYNC_GET_BLOCKS.String(): func(e *fsm.Event) { d.beforeSyncGetBlocks(e) },
+ "before_" + pb.Message_SYNC_BLOCKS.String(): func(e *fsm.Event) { d.beforeSyncBlocks(e) },
+ "before_" + pb.Message_SYNC_STATE_GET_SNAPSHOT.String(): func(e *fsm.Event) { d.beforeSyncStateGetSnapshot(e) },
+ "before_" + pb.Message_SYNC_STATE_SNAPSHOT.String(): func(e *fsm.Event) { d.beforeSyncStateSnapshot(e) },
+ "before_" + pb.Message_SYNC_STATE_GET_DELTAS.String(): func(e *fsm.Event) { d.beforeSyncStateGetDeltas(e) },
+ "before_" + pb.Message_SYNC_STATE_DELTAS.String(): func(e *fsm.Event) { d.beforeSyncStateDeltas(e) },
+ },
+ )
+
+ // If the stream was initiated from this Peer, send an Initial HELLO message
+ if d.initiatedStream {
+ // Send intiial Hello
+ helloMessage, err := d.Coordinator.NewOpenchainDiscoveryHello()
+ if err != nil {
+ return nil, fmt.Errorf("Error getting new HelloMessage: %s", err)
+ }
+ if err := d.SendMessage(helloMessage); err != nil {
+ return nil, fmt.Errorf("Error creating new Peer Handler, error returned sending %s: %s", pb.Message_DISC_HELLO, err)
+ }
+ }
+
+ return d, nil
+}
+
+func (d *Handler) enterState(e *fsm.Event) {
+ peerLogger.Debugf("The Peer's bi-directional stream to %s is %s, from event %s\n", d.ToPeerEndpoint, e.Dst, e.Event)
+}
+
+func (d *Handler) deregister() error {
+ var err error
+ if d.registered {
+ err = d.Coordinator.DeregisterHandler(d)
+ //doneChan is created and waiting for registered handlers only
+ d.doneChan <- struct{}{}
+ d.registered = false
+ }
+ return err
+}
+
+// To return the PeerEndpoint this Handler is connected to.
+func (d *Handler) To() (pb.PeerEndpoint, error) {
+ if d.ToPeerEndpoint == nil {
+ return pb.PeerEndpoint{}, fmt.Errorf("No peer endpoint for handler")
+ }
+ return *(d.ToPeerEndpoint), nil
+}
+
+// Stop stops this handler, which will trigger the Deregister from the MessageHandlerCoordinator.
+func (d *Handler) Stop() error {
+ // Deregister the handler
+ err := d.deregister()
+ if err != nil {
+ return fmt.Errorf("Error stopping MessageHandler: %s", err)
+ }
+ return nil
+}
+
+func (d *Handler) beforeHello(e *fsm.Event) {
+ peerLogger.Debugf("Received %s, parsing out Peer identification", e.Event)
+ // Parse out the PeerEndpoint information
+ if _, ok := e.Args[0].(*pb.Message); !ok {
+ e.Cancel(fmt.Errorf("Received unexpected message type"))
+ return
+ }
+ msg := e.Args[0].(*pb.Message)
+
+ helloMessage := &pb.HelloMessage{}
+ err := proto.Unmarshal(msg.Payload, helloMessage)
+ if err != nil {
+ e.Cancel(fmt.Errorf("Error unmarshalling HelloMessage: %s", err))
+ return
+ }
+ // Store the PeerEndpoint
+ d.ToPeerEndpoint = helloMessage.PeerEndpoint
+ peerLogger.Debugf("Received %s from endpoint=%s", e.Event, helloMessage)
+
+ // If security enabled, need to verify the signature on the hello message
+ if SecurityEnabled() {
+ if err := d.Coordinator.GetSecHelper().Verify(helloMessage.PeerEndpoint.PkiID, msg.Signature, msg.Payload); err != nil {
+ e.Cancel(fmt.Errorf("Error Verifying signature for received HelloMessage: %s", err))
+ return
+ }
+ peerLogger.Debugf("Verified signature for %s", e.Event)
+ }
+
+ if d.initiatedStream == false {
+ // Did NOT intitiate the stream, need to send back HELLO
+ peerLogger.Debugf("Received %s, sending back %s", e.Event, pb.Message_DISC_HELLO.String())
+ // Send back out PeerID information in a Hello
+ helloMessage, err := d.Coordinator.NewOpenchainDiscoveryHello()
+ if err != nil {
+ e.Cancel(fmt.Errorf("Error getting new HelloMessage: %s", err))
+ return
+ }
+ if err := d.SendMessage(helloMessage); err != nil {
+ e.Cancel(fmt.Errorf("Error sending response to %s: %s", e.Event, err))
+ return
+ }
+ }
+ // Register
+ err = d.Coordinator.RegisterHandler(d)
+ if err != nil {
+ e.Cancel(fmt.Errorf("Error registering Handler: %s", err))
+ } else {
+ // Registered successfully
+ d.registered = true
+ otherPeer := d.ToPeerEndpoint.Address
+ if !d.Coordinator.GetDiscHelper().FindNode(otherPeer) {
+ if ok := d.Coordinator.GetDiscHelper().AddNode(otherPeer); !ok {
+ peerLogger.Warningf("Unable to add peer %v to discovery list", otherPeer)
+ }
+ err = d.Coordinator.StoreDiscoveryList()
+ if err != nil {
+ peerLogger.Error(err)
+ }
+ }
+ go d.start()
+ }
+}
+
+func (d *Handler) beforeGetPeers(e *fsm.Event) {
+ peersMessage, err := d.Coordinator.GetPeers()
+ if err != nil {
+ e.Cancel(fmt.Errorf("Error Getting Peers: %s", err))
+ return
+ }
+ data, err := proto.Marshal(peersMessage)
+ if err != nil {
+ e.Cancel(fmt.Errorf("Error Marshalling PeersMessage: %s", err))
+ return
+ }
+ peerLogger.Debugf("Sending back %s", pb.Message_DISC_PEERS.String())
+ if err := d.SendMessage(&pb.Message{Type: pb.Message_DISC_PEERS, Payload: data}); err != nil {
+ e.Cancel(err)
+ }
+}
+
+func (d *Handler) beforePeers(e *fsm.Event) {
+ peerLogger.Debugf("Received %s, grabbing peers message", e.Event)
+ // Parse out the PeerEndpoint information
+ if _, ok := e.Args[0].(*pb.Message); !ok {
+ e.Cancel(fmt.Errorf("Received unexpected message type"))
+ return
+ }
+ msg := e.Args[0].(*pb.Message)
+
+ peersMessage := &pb.PeersMessage{}
+ err := proto.Unmarshal(msg.Payload, peersMessage)
+ if err != nil {
+ e.Cancel(fmt.Errorf("Error unmarshalling PeersMessage: %s", err))
+ return
+ }
+
+ peerLogger.Debugf("Received PeersMessage with Peers: %s", peersMessage)
+ d.Coordinator.PeersDiscovered(peersMessage)
+
+ // // Can be used to demonstrate Broadcast function
+ // if viper.GetString("peer.id") == "jdoe" {
+ // d.Coordinator.Broadcast(&pb.Message{Type: pb.Message_UNDEFINED})
+ // }
+
+}
+
+func (d *Handler) beforeBlockAdded(e *fsm.Event) {
+ peerLogger.Debugf("Received message: %s", e.Event)
+ msg, ok := e.Args[0].(*pb.Message)
+ if !ok {
+ e.Cancel(fmt.Errorf("Received unexpected message type"))
+ return
+ }
+ // Add the block and any delta state to the ledger
+ _ = msg
+}
+
+func (d *Handler) when(stateToCheck string) bool {
+ return d.FSM.Is(stateToCheck)
+}
+
+// HandleMessage handles the Openchain messages for the Peer.
+func (d *Handler) HandleMessage(msg *pb.Message) error {
+ peerLogger.Debugf("Handling Message of type: %s ", msg.Type)
+ if d.FSM.Cannot(msg.Type.String()) {
+ return fmt.Errorf("Peer FSM cannot handle message (%s) with payload size (%d) while in state: %s", msg.Type.String(), len(msg.Payload), d.FSM.Current())
+ }
+ err := d.FSM.Event(msg.Type.String(), msg)
+ if err != nil {
+ if _, ok := err.(*fsm.NoTransitionError); !ok {
+ // Only allow NoTransitionError's, all others are considered true error.
+ return fmt.Errorf("Peer FSM failed while handling message (%s): current state: %s, error: %s", msg.Type.String(), d.FSM.Current(), err)
+ //t.Error("expected only 'NoTransitionError'")
+ }
+ }
+ return nil
+}
+
+// SendMessage sends a message to the remote PEER through the stream
+func (d *Handler) SendMessage(msg *pb.Message) error {
+ //make sure Sends are serialized. Also make sure everyone uses SendMessage
+ //instead of calling Send directly on the grpc stream
+ d.chatMutex.Lock()
+ defer d.chatMutex.Unlock()
+ peerLogger.Debugf("Sending message to stream of type: %s ", msg.Type)
+ err := d.ChatStream.Send(msg)
+ if err != nil {
+ return fmt.Errorf("Error Sending message through ChatStream: %s", err)
+ }
+ return nil
+}
+
+// start starts the Peer server function
+func (d *Handler) start() error {
+ discPeriod := viper.GetDuration("peer.discovery.period")
+ tickChan := time.NewTicker(discPeriod).C
+ peerLogger.Debug("Starting Peer discovery service")
+ for {
+ select {
+ case <-tickChan:
+ if err := d.SendMessage(&pb.Message{Type: pb.Message_DISC_GET_PEERS}); err != nil {
+ peerLogger.Errorf("Error sending %s during handler discovery tick: %s", pb.Message_DISC_GET_PEERS, err)
+ }
+ case <-d.doneChan:
+ peerLogger.Debug("Stopping discovery service")
+ return nil
+ }
+ }
+}
+
+// RequestBlocks get the blocks from the other PeerEndpoint based upon supplied SyncBlockRange, will provide them through the returned channel.
+// this will also stop writing any received blocks to channels created from Prior calls to RequestBlocks(..)
+func (d *Handler) RequestBlocks(syncBlockRange *pb.SyncBlockRange) (<-chan *pb.SyncBlocks, error) {
+ d.syncBlocksRequestHandler.Lock()
+ defer d.syncBlocksRequestHandler.Unlock()
+
+ d.syncBlocksRequestHandler.reset()
+ syncBlockRange.CorrelationId = d.syncBlocksRequestHandler.correlationID
+
+ // Marshal the SyncBlockRange as the payload
+ syncBlockRangeBytes, err := proto.Marshal(syncBlockRange)
+ if err != nil {
+ return nil, fmt.Errorf("Error marshaling syncBlockRange during GetBlocks: %s", err)
+ }
+ peerLogger.Debugf("Sending %s with Range %s", pb.Message_SYNC_GET_BLOCKS.String(), syncBlockRange)
+ if err := d.SendMessage(&pb.Message{Type: pb.Message_SYNC_GET_BLOCKS, Payload: syncBlockRangeBytes}); err != nil {
+ return nil, fmt.Errorf("Error sending %s during GetBlocks: %s", pb.Message_SYNC_GET_BLOCKS, err)
+ }
+ return d.syncBlocksRequestHandler.channel, nil
+}
+
+func (d *Handler) beforeSyncGetBlocks(e *fsm.Event) {
+ peerLogger.Debugf("Received message: %s", e.Event)
+ msg, ok := e.Args[0].(*pb.Message)
+ if !ok {
+ e.Cancel(fmt.Errorf("Received unexpected message type"))
+ return
+ }
+ // Start a separate go FUNC to send the blocks per the SyncBlockRange payload
+ syncBlockRange := &pb.SyncBlockRange{}
+ err := proto.Unmarshal(msg.Payload, syncBlockRange)
+ if err != nil {
+ e.Cancel(fmt.Errorf("Error unmarshalling SyncBlockRange in GetBlocks: %s", err))
+ return
+ }
+
+ go d.sendBlocks(syncBlockRange)
+}
+
+func (d *Handler) beforeSyncBlocks(e *fsm.Event) {
+ peerLogger.Debugf("Received message: %s", e.Event)
+ msg, ok := e.Args[0].(*pb.Message)
+ if !ok {
+ e.Cancel(fmt.Errorf("Received unexpected message type"))
+ return
+ }
+ // Forward the received SyncBlocks to the channel
+ syncBlocks := &pb.SyncBlocks{}
+ err := proto.Unmarshal(msg.Payload, syncBlocks)
+ if err != nil {
+ e.Cancel(fmt.Errorf("Error unmarshalling SyncBlocks in beforeSyncBlocks: %s", err))
+ return
+ }
+
+ peerLogger.Debugf("Sending block onto channel for start = %d and end = %d", syncBlocks.Range.Start, syncBlocks.Range.End)
+
+ // Send the message onto the channel, allow for the fact that channel may be closed on send attempt.
+ defer func() {
+ if x := recover(); x != nil {
+ peerLogger.Errorf("Error sending syncBlocks to channel: %v", x)
+ }
+ }()
+
+ d.syncBlocksRequestHandler.Lock()
+ defer d.syncBlocksRequestHandler.Unlock()
+ // Use non-blocking send, will WARN if missed message.
+ if d.syncBlocksRequestHandler.shouldHandle(syncBlocks.Range.CorrelationId) {
+ select {
+ case d.syncBlocksRequestHandler.channel <- syncBlocks:
+ default:
+ peerLogger.Warningf("Did NOT send SyncBlocks message to channel for range: %d - %d", syncBlocks.Range.Start, syncBlocks.Range.End)
+ d.syncBlocksRequestHandler.reset()
+ }
+ } else {
+ //Ignore the message, does not match the current correlationId
+ peerLogger.Warningf("Ignoring SyncBlocks message with correlationId = %d, blocks %d to %d, as current correlationId = %d", syncBlocks.Range.CorrelationId, syncBlocks.Range.Start, syncBlocks.Range.End, d.syncBlocksRequestHandler.correlationID)
+ }
+}
+
+// sendBlocks sends the blocks based upon the supplied SyncBlockRange over the stream.
+func (d *Handler) sendBlocks(syncBlockRange *pb.SyncBlockRange) {
+ peerLogger.Debugf("Sending blocks %d-%d", syncBlockRange.Start, syncBlockRange.End)
+ var blockNums []uint64
+ if syncBlockRange.Start > syncBlockRange.End {
+ // Send in reverse order
+ // note that i is a uint so decrementing i below 0 results in an underflow (i becomes uint.MaxValue). Always stop after i == 0
+ for i := syncBlockRange.Start; i >= syncBlockRange.End && i <= syncBlockRange.Start; i-- {
+ blockNums = append(blockNums, i)
+ }
+ } else {
+ //
+ for i := syncBlockRange.Start; i <= syncBlockRange.End; i++ {
+ peerLogger.Debugf("Appending to blockNums: %d", i)
+ blockNums = append(blockNums, i)
+ }
+ }
+ for _, currBlockNum := range blockNums {
+ // Get the Block from
+ block, err := d.Coordinator.GetBlockByNumber(currBlockNum)
+ if err != nil {
+ peerLogger.Errorf("Error sending blockNum %d: %s", currBlockNum, err)
+ break
+ }
+ // Encode a SyncBlocks into the payload
+ syncBlocks := &pb.SyncBlocks{Range: &pb.SyncBlockRange{Start: currBlockNum, End: currBlockNum, CorrelationId: syncBlockRange.CorrelationId}, Blocks: []*pb.Block{block}}
+ syncBlocksBytes, err := proto.Marshal(syncBlocks)
+ if err != nil {
+ peerLogger.Errorf("Error marshalling syncBlocks for BlockNum = %d: %s", currBlockNum, err)
+ break
+ }
+ if err := d.SendMessage(&pb.Message{Type: pb.Message_SYNC_BLOCKS, Payload: syncBlocksBytes}); err != nil {
+ peerLogger.Errorf("Error sending blockNum %d: %s", currBlockNum, err)
+ break
+ }
+ }
+}
+
+// ----------------------------------------------------------------------------
+//
+// State sync Snapshot functionality
+//
+//
+// ----------------------------------------------------------------------------
+
+// RequestStateSnapshot request the state snapshot deltas from the other PeerEndpoint, will provide them through the returned channel.
+// this will also stop writing any received syncStateSnapshot(s) to channels created from Prior calls to RequestStateSnapshot()
+func (d *Handler) RequestStateSnapshot() (<-chan *pb.SyncStateSnapshot, error) {
+ d.snapshotRequestHandler.Lock()
+ defer d.snapshotRequestHandler.Unlock()
+ // Reset the handler
+ d.snapshotRequestHandler.reset()
+
+ // Create the syncStateSnapshotRequest
+ syncStateSnapshotRequest := d.snapshotRequestHandler.createRequest()
+ syncStateSnapshotRequestBytes, err := proto.Marshal(syncStateSnapshotRequest)
+ if err != nil {
+ return nil, fmt.Errorf("Error marshaling syncStateSnapshotRequest during GetStateSnapshot: %s", err)
+ }
+ peerLogger.Debugf("Sending %s with syncStateSnapshotRequest = %s", pb.Message_SYNC_STATE_GET_SNAPSHOT.String(), syncStateSnapshotRequest)
+ if err := d.SendMessage(&pb.Message{Type: pb.Message_SYNC_STATE_GET_SNAPSHOT, Payload: syncStateSnapshotRequestBytes}); err != nil {
+ return nil, fmt.Errorf("Error sending %s during GetStateSnapshot: %s", pb.Message_SYNC_STATE_GET_SNAPSHOT, err)
+ }
+
+ return d.snapshotRequestHandler.channel, nil
+}
+
+// beforeSyncStateGetSnapshot triggers the sending of State Snapshot deltas to remote Peer.
+func (d *Handler) beforeSyncStateGetSnapshot(e *fsm.Event) {
+ peerLogger.Debugf("Received message: %s", e.Event)
+ msg, ok := e.Args[0].(*pb.Message)
+ if !ok {
+ e.Cancel(fmt.Errorf("Received unexpected message type"))
+ return
+ }
+ // Unmarshall the sync State snapshot request
+ syncStateSnapshotRequest := &pb.SyncStateSnapshotRequest{}
+ err := proto.Unmarshal(msg.Payload, syncStateSnapshotRequest)
+ if err != nil {
+ e.Cancel(fmt.Errorf("Error unmarshalling SyncStateSnapshotRequest in beforeSyncStateGetSnapshot: %s", err))
+ return
+ }
+
+ // Start a separate go FUNC to send the State snapshot
+ go d.sendStateSnapshot(syncStateSnapshotRequest)
+}
+
+// beforeSyncStateSnapshot will write the State Snapshot deltas to the respective channel.
+func (d *Handler) beforeSyncStateSnapshot(e *fsm.Event) {
+ peerLogger.Debugf("Received message: %s", e.Event)
+ msg, ok := e.Args[0].(*pb.Message)
+ if !ok {
+ e.Cancel(fmt.Errorf("Received unexpected message type"))
+ return
+ }
+ // Forward the received syncStateSnapshot to the channel
+ syncStateSnapshot := &pb.SyncStateSnapshot{}
+ err := proto.Unmarshal(msg.Payload, syncStateSnapshot)
+ if err != nil {
+ e.Cancel(fmt.Errorf("Error unmarshalling syncStateSnapshot in beforeSyncStateSnapshot: %s", err))
+ return
+ }
+
+ // Send the message onto the channel, allow for the fact that channel may be closed on send attempt.
+ defer func() {
+ if x := recover(); x != nil {
+ peerLogger.Errorf("Error sending syncStateSnapshot to channel: %v", x)
+ }
+ }()
+ // Use non-blocking send, will WARN and close channel if missed message.
+ d.snapshotRequestHandler.Lock()
+ defer d.snapshotRequestHandler.Unlock()
+ // Make sure the correlationID matches
+ if d.snapshotRequestHandler.shouldHandle(syncStateSnapshot.Request.CorrelationId) {
+ select {
+ case d.snapshotRequestHandler.channel <- syncStateSnapshot:
+ default:
+ // Was not able to write to the channel, in which case the Snapshot stream is incomplete, and must be discarded, closing the channel
+ // without sending the terminating message which would have had an empty byte slice.
+ peerLogger.Warningf("Did NOT send SyncStateSnapshot message to channel for correlationId = %d, sequence = %d, closing channel as the message has been discarded", syncStateSnapshot.Request.CorrelationId, syncStateSnapshot.Sequence)
+ d.snapshotRequestHandler.reset()
+ }
+ } else {
+ //Ignore the message, does not match the current correlationId
+ peerLogger.Warningf("Ignoring SyncStateSnapshot message with correlationId = %d, sequence = %d, as current correlationId = %d", syncStateSnapshot.Request.CorrelationId, syncStateSnapshot.Sequence, d.snapshotRequestHandler.correlationID)
+ }
+}
+
+// sendBlocks sends the blocks based upon the supplied SyncBlockRange over the stream.
+func (d *Handler) sendStateSnapshot(syncStateSnapshotRequest *pb.SyncStateSnapshotRequest) {
+ peerLogger.Debugf("Sending state snapshot with correlationId = %d", syncStateSnapshotRequest.CorrelationId)
+
+ snapshot, err := d.Coordinator.GetStateSnapshot()
+ if err != nil {
+ peerLogger.Errorf("Error getting snapshot: %s", err)
+ return
+ }
+ defer snapshot.Release()
+
+ // Iterate over the state deltas and send to requestor
+ currBlockNumber := snapshot.GetBlockNumber()
+ var sequence uint64
+ // Loop through and send the Deltas
+ for i := 0; snapshot.Next(); i++ {
+ delta := statemgmt.NewStateDelta()
+ k, v := snapshot.GetRawKeyValue()
+ cID, kID := statemgmt.DecodeCompositeKey(k)
+ delta.Set(cID, kID, v, nil)
+
+ deltaAsBytes := delta.Marshal()
+ // Encode a SyncStateSnapsot into the payload
+ sequence = uint64(i)
+ syncStateSnapshot := &pb.SyncStateSnapshot{Delta: deltaAsBytes, Sequence: sequence, BlockNumber: currBlockNumber, Request: syncStateSnapshotRequest}
+
+ syncStateSnapshotBytes, err := proto.Marshal(syncStateSnapshot)
+ if err != nil {
+ peerLogger.Errorf("Error marshalling syncStateSnapsot for BlockNum = %d: %s", currBlockNumber, err)
+ break
+ }
+ if err := d.SendMessage(&pb.Message{Type: pb.Message_SYNC_STATE_SNAPSHOT, Payload: syncStateSnapshotBytes}); err != nil {
+ peerLogger.Errorf("Error sending syncStateSnapsot for BlockNum = %d: %s", currBlockNumber, err)
+ break
+ }
+ }
+
+ // Now send the terminating message
+ syncStateSnapshot := &pb.SyncStateSnapshot{Delta: []byte{}, Sequence: sequence + 1, BlockNumber: currBlockNumber, Request: syncStateSnapshotRequest}
+ syncStateSnapshotBytes, err := proto.Marshal(syncStateSnapshot)
+ if err != nil {
+ peerLogger.Errorf("Error marshalling terminating syncStateSnapsot message for correlationId = %d, BlockNum = %d: %s", syncStateSnapshotRequest.CorrelationId, currBlockNumber, err)
+ return
+ }
+ if err := d.SendMessage(&pb.Message{Type: pb.Message_SYNC_STATE_SNAPSHOT, Payload: syncStateSnapshotBytes}); err != nil {
+ peerLogger.Errorf("Error sending terminating syncStateSnapsot for correlationId = %d, BlockNum = %d: %s", syncStateSnapshotRequest.CorrelationId, currBlockNumber, err)
+ return
+ }
+
+}
+
+// ----------------------------------------------------------------------------
+//
+// State sync Deltas functionality
+//
+//
+// ----------------------------------------------------------------------------
+
+// RequestStateDeltas get the state snapshot deltas from the other PeerEndpoint, will provide them through the returned channel.
+// this will also stop writing any received syncStateSnapshot(s) to channels created from Prior calls to GetStateSnapshot()
+func (d *Handler) RequestStateDeltas(syncBlockRange *pb.SyncBlockRange) (<-chan *pb.SyncStateDeltas, error) {
+ d.syncStateDeltasRequestHandler.Lock()
+ defer d.syncStateDeltasRequestHandler.Unlock()
+ // Reset the handler
+ d.syncStateDeltasRequestHandler.reset()
+ syncBlockRange.CorrelationId = d.syncStateDeltasRequestHandler.correlationID
+
+ // Create the syncStateSnapshotRequest
+ syncStateDeltasRequest := d.syncStateDeltasRequestHandler.createRequest(syncBlockRange)
+ syncStateDeltasRequestBytes, err := proto.Marshal(syncStateDeltasRequest)
+ if err != nil {
+ return nil, fmt.Errorf("Error marshaling syncStateDeltasRequest during RequestStateDeltas: %s", err)
+ }
+ peerLogger.Debugf("Sending %s with syncStateDeltasRequest = %s", pb.Message_SYNC_STATE_GET_DELTAS.String(), syncStateDeltasRequest)
+ if err := d.SendMessage(&pb.Message{Type: pb.Message_SYNC_STATE_GET_DELTAS, Payload: syncStateDeltasRequestBytes}); err != nil {
+ return nil, fmt.Errorf("Error sending %s during RequestStateDeltas: %s", pb.Message_SYNC_STATE_GET_DELTAS, err)
+ }
+
+ return d.syncStateDeltasRequestHandler.channel, nil
+}
+
+// beforeSyncStateGetDeltas triggers the sending of Get SyncStateDeltas to remote Peer.
+func (d *Handler) beforeSyncStateGetDeltas(e *fsm.Event) {
+ peerLogger.Debugf("Received message: %s", e.Event)
+ msg, ok := e.Args[0].(*pb.Message)
+ if !ok {
+ e.Cancel(fmt.Errorf("Received unexpected message type"))
+ return
+ }
+ // Unmarshall the sync State deltas request
+ syncStateDeltasRequest := &pb.SyncStateDeltasRequest{}
+ err := proto.Unmarshal(msg.Payload, syncStateDeltasRequest)
+ if err != nil {
+ e.Cancel(fmt.Errorf("Error unmarshalling SyncStateDeltasRequest in beforeSyncStateGetDeltas: %s", err))
+ return
+ }
+
+ // Start a separate go FUNC to send the State Deltas
+ go d.sendStateDeltas(syncStateDeltasRequest)
+}
+
+// sendBlocks sends the blocks based upon the supplied SyncBlockRange over the stream.
+func (d *Handler) sendStateDeltas(syncStateDeltasRequest *pb.SyncStateDeltasRequest) {
+ peerLogger.Debugf("Sending state deltas for block range %d-%d", syncStateDeltasRequest.Range.Start, syncStateDeltasRequest.Range.End)
+ var blockNums []uint64
+ syncBlockRange := syncStateDeltasRequest.Range
+ if syncBlockRange.Start > syncBlockRange.End {
+ // Send in reverse order
+ for i := syncBlockRange.Start; i >= syncBlockRange.End; i-- {
+ blockNums = append(blockNums, i)
+ }
+ } else {
+ //
+ for i := syncBlockRange.Start; i <= syncBlockRange.End; i++ {
+ peerLogger.Debugf("Appending to blockNums: %d", i)
+ blockNums = append(blockNums, i)
+ }
+ }
+ for _, currBlockNum := range blockNums {
+ // Get the state deltas for Block from coordinator
+ stateDelta, err := d.Coordinator.GetStateDelta(currBlockNum)
+ if err != nil {
+ peerLogger.Errorf("Error sending stateDelta for blockNum %d: %s", currBlockNum, err)
+ break
+ }
+ if stateDelta == nil {
+ peerLogger.Warningf("Requested to send a stateDelta for blockNum %d which has been discarded", currBlockNum)
+ break
+ }
+ // Encode a SyncStateDeltas into the payload
+ stateDeltaBytes := stateDelta.Marshal()
+ syncStateDeltas := &pb.SyncStateDeltas{Range: &pb.SyncBlockRange{Start: currBlockNum, End: currBlockNum, CorrelationId: syncBlockRange.CorrelationId}, Deltas: [][]byte{stateDeltaBytes}}
+ syncStateDeltasBytes, err := proto.Marshal(syncStateDeltas)
+ if err != nil {
+ peerLogger.Errorf("Error marshalling syncStateDeltas for BlockNum = %d: %s", currBlockNum, err)
+ break
+ }
+ if err := d.SendMessage(&pb.Message{Type: pb.Message_SYNC_STATE_DELTAS, Payload: syncStateDeltasBytes}); err != nil {
+ peerLogger.Errorf("Error sending stateDeltas for blockNum %d: %s", currBlockNum, err)
+ break
+ }
+ }
+}
+
+func (d *Handler) beforeSyncStateDeltas(e *fsm.Event) {
+ peerLogger.Debugf("Received message: %s", e.Event)
+ msg, ok := e.Args[0].(*pb.Message)
+ if !ok {
+ e.Cancel(fmt.Errorf("Received unexpected message type"))
+ return
+ }
+ // Forward the received SyncStateDeltas to the channel
+ syncStateDeltas := &pb.SyncStateDeltas{}
+ err := proto.Unmarshal(msg.Payload, syncStateDeltas)
+ if err != nil {
+ e.Cancel(fmt.Errorf("Error unmarshalling SyncStateDeltas in beforeSyncStateDeltas: %s", err))
+ return
+ }
+ peerLogger.Debugf("Sending state delta onto channel for start = %d and end = %d", syncStateDeltas.Range.Start, syncStateDeltas.Range.End)
+
+ // Send the message onto the channel, allow for the fact that channel may be closed on send attempt.
+ defer func() {
+ if x := recover(); x != nil {
+ peerLogger.Errorf("Error sending syncStateDeltas to channel: %v", x)
+ }
+ }()
+
+ // Use non-blocking send, will WARN and close channel if missed message.
+ d.syncStateDeltasRequestHandler.Lock()
+ defer d.syncStateDeltasRequestHandler.Unlock()
+ if d.syncStateDeltasRequestHandler.shouldHandle(syncStateDeltas.Range.CorrelationId) {
+ select {
+ case d.syncStateDeltasRequestHandler.channel <- syncStateDeltas:
+ default:
+ // Was not able to write to the channel, in which case the SyncStateDeltasRequest stream is incomplete, and must be discarded, closing the channel
+ peerLogger.Warningf("Did NOT send SyncStateDeltas message to channel for block range %d-%d, closing channel as the message has been discarded", syncStateDeltas.Range.Start, syncStateDeltas.Range.End)
+ d.syncStateDeltasRequestHandler.reset()
+ }
+ } else {
+ //Ignore the message, does not match the current correlationId
+ peerLogger.Warningf("Ignoring SyncStateDeltas message with correlationId = %d, blocks %d to %d, as current correlationId = %d", syncStateDeltas.Range.CorrelationId, syncStateDeltas.Range.Start, syncStateDeltas.Range.End, d.syncStateDeltasRequestHandler.correlationID)
+ }
+
+}
diff --git a/core/peer/handler_sync_state.go b/core/peer/handler_sync_state.go
new file mode 100644
index 00000000000..16484833ba4
--- /dev/null
+++ b/core/peer/handler_sync_state.go
@@ -0,0 +1,121 @@
+/*
+Copyright IBM Corp. 2016 All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package peer
+
+import (
+ "sync"
+
+ pb "github.com/hyperledger/fabric/protos"
+)
+
+//-----------------------------------------------------------------------------
+//
+// Sync Handler
+//
+//-----------------------------------------------------------------------------
+
+type syncHandler struct {
+ sync.Mutex
+ correlationID uint64
+}
+
+func (sh *syncHandler) shouldHandle(correlationID uint64) bool {
+ return correlationID == sh.correlationID
+}
+
+//-----------------------------------------------------------------------------
+//
+// Sync Blocks Handler
+//
+//-----------------------------------------------------------------------------
+
+type syncBlocksRequestHandler struct {
+ syncHandler
+ channel chan *pb.SyncBlocks
+}
+
+func (sbh *syncBlocksRequestHandler) reset() {
+ if sbh.channel != nil {
+ close(sbh.channel)
+ }
+ sbh.channel = make(chan *pb.SyncBlocks, SyncBlocksChannelSize())
+ sbh.correlationID++
+}
+
+func newSyncBlocksRequestHandler() *syncBlocksRequestHandler {
+ sbh := &syncBlocksRequestHandler{}
+ sbh.reset()
+ return sbh
+}
+
+//-----------------------------------------------------------------------------
+//
+// Sync State Snapshot Handler
+//
+//-----------------------------------------------------------------------------
+
+type syncStateSnapshotRequestHandler struct {
+ syncHandler
+ channel chan *pb.SyncStateSnapshot
+}
+
+func (srh *syncStateSnapshotRequestHandler) reset() {
+ if srh.channel != nil {
+ close(srh.channel)
+ }
+ srh.channel = make(chan *pb.SyncStateSnapshot, SyncStateSnapshotChannelSize())
+ srh.correlationID++
+}
+
+func (srh *syncStateSnapshotRequestHandler) createRequest() *pb.SyncStateSnapshotRequest {
+ return &pb.SyncStateSnapshotRequest{CorrelationId: srh.correlationID}
+}
+
+func newSyncStateSnapshotRequestHandler() *syncStateSnapshotRequestHandler {
+ srh := &syncStateSnapshotRequestHandler{}
+ srh.reset()
+ return srh
+}
+
+//-----------------------------------------------------------------------------
+//
+// Sync State Deltas Handler
+//
+//-----------------------------------------------------------------------------
+
+type syncStateDeltasHandler struct {
+ syncHandler
+ channel chan *pb.SyncStateDeltas
+}
+
+func (ssdh *syncStateDeltasHandler) reset() {
+ if ssdh.channel != nil {
+ close(ssdh.channel)
+ }
+ ssdh.channel = make(chan *pb.SyncStateDeltas, SyncStateDeltasChannelSize())
+ ssdh.correlationID++
+}
+
+func (ssdh *syncStateDeltasHandler) createRequest(syncBlockRange *pb.SyncBlockRange) *pb.SyncStateDeltasRequest {
+ return &pb.SyncStateDeltasRequest{Range: syncBlockRange}
+}
+
+func newSyncStateDeltasHandler() *syncStateDeltasHandler {
+ ssdh := &syncStateDeltasHandler{}
+ ssdh.reset()
+ return ssdh
+}
diff --git a/core/peer/peer.go b/core/peer/peer.go
new file mode 100644
index 00000000000..cea2319afca
--- /dev/null
+++ b/core/peer/peer.go
@@ -0,0 +1,889 @@
+/*
+Copyright IBM Corp. 2016 All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package peer
+
+import (
+ "errors"
+ "fmt"
+ "io"
+ "net"
+ "strings"
+ "sync"
+ "time"
+
+ "golang.org/x/net/context"
+
+ "google.golang.org/grpc"
+
+ "github.com/golang/protobuf/proto"
+ "github.com/op/go-logging"
+ "github.com/spf13/viper"
+
+ "github.com/hyperledger/fabric/core/comm"
+ "github.com/hyperledger/fabric/core/crypto"
+ "github.com/hyperledger/fabric/core/db"
+ "github.com/hyperledger/fabric/core/discovery"
+ "github.com/hyperledger/fabric/core/ledger"
+ "github.com/hyperledger/fabric/core/ledger/statemgmt"
+ "github.com/hyperledger/fabric/core/ledger/statemgmt/state"
+ "github.com/hyperledger/fabric/core/util"
+ pb "github.com/hyperledger/fabric/protos"
+)
+
+// Peer provides interface for a peer
+type Peer interface {
+ GetPeerEndpoint() (*pb.PeerEndpoint, error)
+ NewOpenchainDiscoveryHello() (*pb.Message, error)
+}
+
+// BlocksRetriever interface for retrieving blocks .
+type BlocksRetriever interface {
+ RequestBlocks(*pb.SyncBlockRange) (<-chan *pb.SyncBlocks, error)
+}
+
+// StateRetriever interface for retrieving state deltas, etc.
+type StateRetriever interface {
+ RequestStateSnapshot() (<-chan *pb.SyncStateSnapshot, error)
+ RequestStateDeltas(syncBlockRange *pb.SyncBlockRange) (<-chan *pb.SyncStateDeltas, error)
+}
+
+// RemoteLedger interface for retrieving remote ledger data.
+type RemoteLedger interface {
+ BlocksRetriever
+ StateRetriever
+}
+
+// BlockChainAccessor interface for retreiving blocks by block number
+type BlockChainAccessor interface {
+ GetBlockByNumber(blockNumber uint64) (*pb.Block, error)
+ GetBlockchainSize() uint64
+ GetCurrentStateHash() (stateHash []byte, err error)
+}
+
+// BlockChainModifier interface for applying changes to the block chain
+type BlockChainModifier interface {
+ ApplyStateDelta(id interface{}, delta *statemgmt.StateDelta) error
+ RollbackStateDelta(id interface{}) error
+ CommitStateDelta(id interface{}) error
+ EmptyState() error
+ PutBlock(blockNumber uint64, block *pb.Block) error
+}
+
+// BlockChainUtil interface for interrogating the block chain
+type BlockChainUtil interface {
+ HashBlock(block *pb.Block) ([]byte, error)
+ VerifyBlockchain(start, finish uint64) (uint64, error)
+}
+
+// StateAccessor interface for retreiving blocks by block number
+type StateAccessor interface {
+ GetStateSnapshot() (*state.StateSnapshot, error)
+ GetStateDelta(blockNumber uint64) (*statemgmt.StateDelta, error)
+}
+
+// MessageHandler standard interface for handling Openchain messages.
+type MessageHandler interface {
+ RemoteLedger
+ HandleMessage(msg *pb.Message) error
+ SendMessage(msg *pb.Message) error
+ To() (pb.PeerEndpoint, error)
+ Stop() error
+}
+
+// MessageHandlerCoordinator responsible for coordinating between the registered MessageHandler's
+type MessageHandlerCoordinator interface {
+ Peer
+ SecurityAccessor
+ BlockChainAccessor
+ BlockChainModifier
+ BlockChainUtil
+ StateAccessor
+ RegisterHandler(messageHandler MessageHandler) error
+ DeregisterHandler(messageHandler MessageHandler) error
+ Broadcast(*pb.Message, pb.PeerEndpoint_Type) []error
+ Unicast(*pb.Message, *pb.PeerID) error
+ GetPeers() (*pb.PeersMessage, error)
+ GetRemoteLedger(receiver *pb.PeerID) (RemoteLedger, error)
+ PeersDiscovered(*pb.PeersMessage) error
+ ExecuteTransaction(transaction *pb.Transaction) *pb.Response
+ Discoverer
+}
+
+// ChatStream interface supported by stream between Peers
+type ChatStream interface {
+ Send(*pb.Message) error
+ Recv() (*pb.Message, error)
+}
+
+// SecurityAccessor interface enables a Peer to hand out the crypto object for Peer
+type SecurityAccessor interface {
+ GetSecHelper() crypto.Peer
+}
+
+var peerLogger = logging.MustGetLogger("peer")
+
+// NewPeerClientConnection Returns a new grpc.ClientConn to the configured local PEER.
+func NewPeerClientConnection() (*grpc.ClientConn, error) {
+ return NewPeerClientConnectionWithAddress(viper.GetString("peer.address"))
+}
+
+// GetLocalIP returns the non loopback local IP of the host
+func GetLocalIP() string {
+ addrs, err := net.InterfaceAddrs()
+ if err != nil {
+ return ""
+ }
+ for _, address := range addrs {
+ // check the address type and if it is not a loopback then display it
+ if ipnet, ok := address.(*net.IPNet); ok && !ipnet.IP.IsLoopback() {
+ if ipnet.IP.To4() != nil {
+ return ipnet.IP.String()
+ }
+ }
+ }
+ return ""
+}
+
+// NewPeerClientConnectionWithAddress Returns a new grpc.ClientConn to the configured local PEER.
+func NewPeerClientConnectionWithAddress(peerAddress string) (*grpc.ClientConn, error) {
+ if comm.TLSEnabled() {
+ return comm.NewClientConnectionWithAddress(peerAddress, true, true, comm.InitTLSForPeer())
+ }
+ return comm.NewClientConnectionWithAddress(peerAddress, true, false, nil)
+}
+
+type ledgerWrapper struct {
+ sync.RWMutex
+ ledger *ledger.Ledger
+}
+
+type handlerMap struct {
+ sync.RWMutex
+ m map[pb.PeerID]MessageHandler
+}
+
+// HandlerFactory for creating new MessageHandlers
+type HandlerFactory func(MessageHandlerCoordinator, ChatStream, bool, MessageHandler) (MessageHandler, error)
+
+// EngineFactory for creating new engines
+type EngineFactory func(MessageHandlerCoordinator) (Engine, error)
+
+// PeerImpl implementation of the Peer service
+type PeerImpl struct {
+ handlerFactory HandlerFactory
+ handlerMap *handlerMap
+ ledgerWrapper *ledgerWrapper
+ secHelper crypto.Peer
+ engine Engine
+ isValidator bool
+ reconnectOnce sync.Once
+ discHelper discovery.Discovery
+ discPersist bool
+}
+
+// TransactionProccesor responsible for processing of Transactions
+type TransactionProccesor interface {
+ ProcessTransactionMsg(*pb.Message, *pb.Transaction) *pb.Response
+}
+
+// Engine Responsible for managing Peer network communications (Handlers) and processing of Transactions
+type Engine interface {
+ TransactionProccesor
+ // GetHandlerFactory return a handler for an accepted Chat stream
+ GetHandlerFactory() HandlerFactory
+ //GetInputChannel() (chan<- *pb.Transaction, error)
+}
+
+// NewPeerWithHandler returns a Peer which uses the supplied handler factory function for creating new handlers on new Chat service invocations.
+func NewPeerWithHandler(secHelperFunc func() crypto.Peer, handlerFact HandlerFactory) (*PeerImpl, error) {
+ peer := new(PeerImpl)
+ peerNodes := peer.initDiscovery()
+
+ if handlerFact == nil {
+ return nil, errors.New("Cannot supply nil handler factory")
+ }
+ peer.handlerFactory = handlerFact
+ peer.handlerMap = &handlerMap{m: make(map[pb.PeerID]MessageHandler)}
+
+ peer.secHelper = secHelperFunc()
+
+ // Install security object for peer
+ if SecurityEnabled() {
+ if peer.secHelper == nil {
+ return nil, fmt.Errorf("Security helper not provided")
+ }
+ }
+
+ ledgerPtr, err := ledger.GetLedger()
+ if err != nil {
+ return nil, fmt.Errorf("Error constructing NewPeerWithHandler: %s", err)
+ }
+ peer.ledgerWrapper = &ledgerWrapper{ledger: ledgerPtr}
+
+ peer.chatWithSomePeers(peerNodes)
+ return peer, nil
+}
+
+// NewPeerWithEngine returns a Peer which uses the supplied handler factory function for creating new handlers on new Chat service invocations.
+func NewPeerWithEngine(secHelperFunc func() crypto.Peer, engFactory EngineFactory) (peer *PeerImpl, err error) {
+ peer = new(PeerImpl)
+ peerNodes := peer.initDiscovery()
+
+ peer.handlerMap = &handlerMap{m: make(map[pb.PeerID]MessageHandler)}
+
+ peer.isValidator = ValidatorEnabled()
+ peer.secHelper = secHelperFunc()
+
+ // Install security object for peer
+ if SecurityEnabled() {
+ if peer.secHelper == nil {
+ return nil, fmt.Errorf("Security helper not provided")
+ }
+ }
+
+ // Initialize the ledger before the engine, as consensus may want to begin interrogating the ledger immediately
+ ledgerPtr, err := ledger.GetLedger()
+ if err != nil {
+ return nil, fmt.Errorf("Error constructing NewPeerWithHandler: %s", err)
+ }
+ peer.ledgerWrapper = &ledgerWrapper{ledger: ledgerPtr}
+
+ peer.engine, err = engFactory(peer)
+ if err != nil {
+ return nil, err
+ }
+ peer.handlerFactory = peer.engine.GetHandlerFactory()
+ if peer.handlerFactory == nil {
+ return nil, errors.New("Cannot supply nil handler factory")
+ }
+
+ peer.chatWithSomePeers(peerNodes)
+ return peer, nil
+
+}
+
+// Chat implementation of the the Chat bidi streaming RPC function
+func (p *PeerImpl) Chat(stream pb.Peer_ChatServer) error {
+ return p.handleChat(stream.Context(), stream, false)
+}
+
+// ProcessTransaction implementation of the ProcessTransaction RPC function
+func (p *PeerImpl) ProcessTransaction(ctx context.Context, tx *pb.Transaction) (response *pb.Response, err error) {
+ peerLogger.Debugf("ProcessTransaction processing transaction uuid = %s", tx.Uuid)
+ // Need to validate the Tx's signature if we are a validator.
+ if p.isValidator {
+ // Verify transaction signature if security is enabled
+ secHelper := p.secHelper
+ if nil != secHelper {
+ peerLogger.Debugf("Verifying transaction signature %s", tx.Uuid)
+ if tx, err = secHelper.TransactionPreValidation(tx); err != nil {
+ peerLogger.Errorf("ProcessTransaction failed to verify transaction %v", err)
+ return &pb.Response{Status: pb.Response_FAILURE, Msg: []byte(err.Error())}, nil
+ }
+ }
+
+ }
+ return p.ExecuteTransaction(tx), err
+}
+
+// GetPeers returns the currently registered PeerEndpoints
+func (p *PeerImpl) GetPeers() (*pb.PeersMessage, error) {
+ p.handlerMap.RLock()
+ defer p.handlerMap.RUnlock()
+ peers := []*pb.PeerEndpoint{}
+ for _, msgHandler := range p.handlerMap.m {
+ peerEndpoint, err := msgHandler.To()
+ if err != nil {
+ return nil, fmt.Errorf("Error getting peers: %s", err)
+ }
+ peers = append(peers, &peerEndpoint)
+ }
+ peersMessage := &pb.PeersMessage{Peers: peers}
+ return peersMessage, nil
+}
+
+func getPeerAddresses(peersMsg *pb.PeersMessage) []string {
+ peers := peersMsg.GetPeers()
+ addresses := make([]string, len(peers))
+ for i, v := range peers {
+ addresses[i] = v.Address
+ }
+ return addresses
+}
+
+// GetRemoteLedger returns the RemoteLedger interface for the remote Peer Endpoint
+func (p *PeerImpl) GetRemoteLedger(receiverHandle *pb.PeerID) (RemoteLedger, error) {
+ p.handlerMap.RLock()
+ defer p.handlerMap.RUnlock()
+ remoteLedger, ok := p.handlerMap.m[*receiverHandle]
+ if !ok {
+ return nil, fmt.Errorf("Remote ledger not found for receiver %s", receiverHandle.Name)
+ }
+ return remoteLedger, nil
+}
+
+// PeersDiscovered used by MessageHandlers for notifying this coordinator of discovered PeerEndoints. May include this Peer's PeerEndpoint.
+func (p *PeerImpl) PeersDiscovered(peersMessage *pb.PeersMessage) error {
+ thisPeersEndpoint, err := GetPeerEndpoint()
+ if err != nil {
+ return fmt.Errorf("Error in processing PeersDiscovered: %s", err)
+ }
+ p.handlerMap.RLock()
+ defer p.handlerMap.RUnlock()
+ for _, peerEndpoint := range peersMessage.Peers {
+ // Filter out THIS Peer's endpoint
+ if *getHandlerKeyFromPeerEndpoint(thisPeersEndpoint) == *getHandlerKeyFromPeerEndpoint(peerEndpoint) {
+ // NOOP
+ } else if _, ok := p.handlerMap.m[*getHandlerKeyFromPeerEndpoint(peerEndpoint)]; ok == false {
+ // Start chat with Peer
+ p.chatWithSomePeers([]string{peerEndpoint.Address})
+ }
+ }
+ return nil
+}
+
+func getHandlerKey(peerMessageHandler MessageHandler) (*pb.PeerID, error) {
+ peerEndpoint, err := peerMessageHandler.To()
+ if err != nil {
+ return &pb.PeerID{}, fmt.Errorf("Error getting messageHandler key: %s", err)
+ }
+ return peerEndpoint.ID, nil
+}
+
+func getHandlerKeyFromPeerEndpoint(peerEndpoint *pb.PeerEndpoint) *pb.PeerID {
+ return peerEndpoint.ID
+}
+
+// RegisterHandler register a MessageHandler with this coordinator
+func (p *PeerImpl) RegisterHandler(messageHandler MessageHandler) error {
+ key, err := getHandlerKey(messageHandler)
+ if err != nil {
+ return fmt.Errorf("Error registering handler: %s", err)
+ }
+ p.handlerMap.Lock()
+ defer p.handlerMap.Unlock()
+ if _, ok := p.handlerMap.m[*key]; ok == true {
+ // Duplicate, return error
+ return newDuplicateHandlerError(messageHandler)
+ }
+ p.handlerMap.m[*key] = messageHandler
+ peerLogger.Debugf("registered handler with key: %s", key)
+ return nil
+}
+
+// DeregisterHandler deregisters an already registered MessageHandler for this coordinator
+func (p *PeerImpl) DeregisterHandler(messageHandler MessageHandler) error {
+ key, err := getHandlerKey(messageHandler)
+ if err != nil {
+ return fmt.Errorf("Error deregistering handler: %s", err)
+ }
+ p.handlerMap.Lock()
+ defer p.handlerMap.Unlock()
+ if _, ok := p.handlerMap.m[*key]; !ok {
+ // Handler NOT found
+ return fmt.Errorf("Error deregistering handler, could not find handler with key: %s", key)
+ }
+ delete(p.handlerMap.m, *key)
+ peerLogger.Debugf("Deregistered handler with key: %s", key)
+ return nil
+}
+
+// Clone the handler map to avoid locking across SendMessage
+func (p *PeerImpl) cloneHandlerMap(typ pb.PeerEndpoint_Type) map[pb.PeerID]MessageHandler {
+ p.handlerMap.RLock()
+ defer p.handlerMap.RUnlock()
+ clone := make(map[pb.PeerID]MessageHandler)
+ for id, msgHandler := range p.handlerMap.m {
+ //pb.PeerEndpoint_UNDEFINED collects all peers
+ if typ != pb.PeerEndpoint_UNDEFINED {
+ toPeerEndpoint, _ := msgHandler.To()
+ //ignore endpoints that don't match type filter
+ if typ != toPeerEndpoint.Type {
+ continue
+ }
+ }
+ clone[id] = msgHandler
+ }
+ return clone
+}
+
+// Broadcast broadcast a message to each of the currently registered PeerEndpoints of given type
+// Broadcast will broadcast to all registered PeerEndpoints if the type is PeerEndpoint_UNDEFINED
+func (p *PeerImpl) Broadcast(msg *pb.Message, typ pb.PeerEndpoint_Type) []error {
+ cloneMap := p.cloneHandlerMap(typ)
+ errorsFromHandlers := make(chan error, len(cloneMap))
+ var bcWG sync.WaitGroup
+
+ start := time.Now()
+
+ for _, msgHandler := range cloneMap {
+ bcWG.Add(1)
+ go func(msgHandler MessageHandler) {
+ defer bcWG.Done()
+ host, _ := msgHandler.To()
+ t1 := time.Now()
+ err := msgHandler.SendMessage(msg)
+ if err != nil {
+ toPeerEndpoint, _ := msgHandler.To()
+ errorsFromHandlers <- fmt.Errorf("Error broadcasting msg (%s) to PeerEndpoint (%s): %s", msg.Type, toPeerEndpoint, err)
+ }
+ peerLogger.Debugf("Sending %d bytes to %s took %v", len(msg.Payload), host.Address, time.Since(t1))
+
+ }(msgHandler)
+
+ }
+ bcWG.Wait()
+ close(errorsFromHandlers)
+ var returnedErrors []error
+ for err := range errorsFromHandlers {
+ returnedErrors = append(returnedErrors, err)
+ }
+
+ elapsed := time.Since(start)
+ peerLogger.Debugf("Broadcast took %v", elapsed)
+
+ return returnedErrors
+}
+
+func (p *PeerImpl) getMessageHandler(receiverHandle *pb.PeerID) (MessageHandler, error) {
+ p.handlerMap.RLock()
+ defer p.handlerMap.RUnlock()
+ msgHandler, ok := p.handlerMap.m[*receiverHandle]
+ if !ok {
+ return nil, fmt.Errorf("Message handler not found for receiver %s", receiverHandle.Name)
+ }
+ return msgHandler, nil
+}
+
+// Unicast sends a message to a specific peer.
+func (p *PeerImpl) Unicast(msg *pb.Message, receiverHandle *pb.PeerID) error {
+ msgHandler, err := p.getMessageHandler(receiverHandle)
+ if err != nil {
+ return err
+ }
+ err = msgHandler.SendMessage(msg)
+ if err != nil {
+ toPeerEndpoint, _ := msgHandler.To()
+ return fmt.Errorf("Error unicasting msg (%s) to PeerEndpoint (%s): %s", msg.Type, toPeerEndpoint, err)
+ }
+ return nil
+}
+
+// SendTransactionsToPeer forwards transactions to the specified peer address.
+func (p *PeerImpl) SendTransactionsToPeer(peerAddress string, transaction *pb.Transaction) (response *pb.Response) {
+ conn, err := NewPeerClientConnectionWithAddress(peerAddress)
+ if err != nil {
+ return &pb.Response{Status: pb.Response_FAILURE, Msg: []byte(fmt.Sprintf("Error creating client to peer address=%s: %s", peerAddress, err))}
+ }
+ defer conn.Close()
+ serverClient := pb.NewPeerClient(conn)
+ peerLogger.Debugf("Sending TX to Peer: %s", peerAddress)
+ response, err = serverClient.ProcessTransaction(context.Background(), transaction)
+ if err != nil {
+ return &pb.Response{Status: pb.Response_FAILURE, Msg: []byte(fmt.Sprintf("Error calling ProcessTransaction on remote peer at address=%s: %s", peerAddress, err))}
+ }
+ return response
+}
+
+// sendTransactionsToLocalEngine send the transaction to the local engine (This Peer is a validator)
+func (p *PeerImpl) sendTransactionsToLocalEngine(transaction *pb.Transaction) *pb.Response {
+
+ peerLogger.Debugf("Marshalling transaction %s to send to local engine", transaction.Type)
+ data, err := proto.Marshal(transaction)
+ if err != nil {
+ return &pb.Response{Status: pb.Response_FAILURE, Msg: []byte(fmt.Sprintf("Error sending transaction to local engine: %s", err))}
+ }
+
+ var response *pb.Response
+ msg := &pb.Message{Type: pb.Message_CHAIN_TRANSACTION, Payload: data, Timestamp: util.CreateUtcTimestamp()}
+ peerLogger.Debugf("Sending message %s with timestamp %v to local engine", msg.Type, msg.Timestamp)
+ response = p.engine.ProcessTransactionMsg(msg, transaction)
+
+ return response
+}
+
+func (p *PeerImpl) ensureConnected() {
+ touchPeriod := viper.GetDuration("peer.discovery.touchPeriod")
+ touchMaxNodes := viper.GetInt("peer.discovery.touchMaxNodes")
+ tickChan := time.NewTicker(touchPeriod).C
+ peerLogger.Debugf("Starting Peer reconnect service (touch service), with period = %s", touchPeriod)
+ for {
+ // Simply loop and check if need to reconnect
+ <-tickChan
+ peersMsg, err := p.GetPeers()
+ if err != nil {
+ peerLogger.Errorf("Error in touch service: %s", err.Error())
+ }
+ allNodes := p.discHelper.GetAllNodes() // these will always be returned in random order
+ if len(peersMsg.Peers) < len(allNodes) {
+ peerLogger.Warning("Touch service indicates dropped connections, attempting to reconnect...")
+ delta := util.FindMissingElements(allNodes, getPeerAddresses(peersMsg))
+ if len(delta) > touchMaxNodes {
+ delta = delta[:touchMaxNodes]
+ }
+ p.chatWithSomePeers(delta)
+ } else {
+ peerLogger.Debug("Touch service indicates no dropped connections")
+ }
+ peerLogger.Debugf("Connected to: %v", getPeerAddresses(peersMsg))
+ peerLogger.Debugf("Discovery knows about: %v", allNodes)
+ }
+
+}
+
+// chatWithSomePeers initiates chat with 1 or all peers according to whether the node is a validator or not
+func (p *PeerImpl) chatWithSomePeers(addresses []string) {
+ // start the function to ensure we are connected
+ p.reconnectOnce.Do(func() {
+ go p.ensureConnected()
+ })
+ if len(addresses) == 0 {
+ peerLogger.Debug("Starting up the first peer of a new network")
+ return // nothing to do
+ }
+ for _, address := range addresses {
+ if pe, err := GetPeerEndpoint(); err == nil {
+ if address == pe.Address {
+ peerLogger.Debugf("Skipping own address: %v", address)
+ continue
+ }
+ } else {
+ peerLogger.Errorf("Failed to obtain peer endpoint, %v", err)
+ return
+ }
+ go p.chatWithPeer(address)
+ }
+}
+
+func (p *PeerImpl) chatWithPeer(address string) error {
+ peerLogger.Debugf("Initiating Chat with peer address: %s", address)
+ conn, err := NewPeerClientConnectionWithAddress(address)
+ if err != nil {
+ peerLogger.Errorf("Error creating connection to peer address %s: %s", address, err)
+ return err
+ }
+ serverClient := pb.NewPeerClient(conn)
+ ctx := context.Background()
+ stream, err := serverClient.Chat(ctx)
+ if err != nil {
+ peerLogger.Errorf("Error establishing chat with peer address %s: %s", address, err)
+ return err
+ }
+ peerLogger.Debugf("Established Chat with peer address: %s", address)
+ err = p.handleChat(ctx, stream, true)
+ stream.CloseSend()
+ if err != nil {
+ peerLogger.Errorf("Ending Chat with peer address %s due to error: %s", address, err)
+ return err
+ }
+ return nil
+}
+
+// Chat implementation of the the Chat bidi streaming RPC function
+func (p *PeerImpl) handleChat(ctx context.Context, stream ChatStream, initiatedStream bool) error {
+ deadline, ok := ctx.Deadline()
+ peerLogger.Debugf("Current context deadline = %s, ok = %v", deadline, ok)
+ handler, err := p.handlerFactory(p, stream, initiatedStream, nil)
+ if err != nil {
+ return fmt.Errorf("Error creating handler during handleChat initiation: %s", err)
+ }
+ defer handler.Stop()
+ for {
+ in, err := stream.Recv()
+ if err == io.EOF {
+ peerLogger.Debug("Received EOF, ending Chat")
+ return nil
+ }
+ if err != nil {
+ e := fmt.Errorf("Error during Chat, stopping handler: %s", err)
+ peerLogger.Error(e.Error())
+ return e
+ }
+ err = handler.HandleMessage(in)
+ if err != nil {
+ peerLogger.Errorf("Error handling message: %s", err)
+ //return err
+ }
+ }
+}
+
+//ExecuteTransaction executes transactions decides to do execute in dev or prod mode
+func (p *PeerImpl) ExecuteTransaction(transaction *pb.Transaction) (response *pb.Response) {
+ if p.isValidator {
+ response = p.sendTransactionsToLocalEngine(transaction)
+ } else {
+ peerAddresses := p.discHelper.GetRandomNodes(1)
+ response = p.SendTransactionsToPeer(peerAddresses[0], transaction)
+ }
+ return response
+}
+
+// GetPeerEndpoint returns the endpoint for this peer
+func (p *PeerImpl) GetPeerEndpoint() (*pb.PeerEndpoint, error) {
+ ep, err := GetPeerEndpoint()
+ if err == nil && SecurityEnabled() {
+ // Set the PkiID on the PeerEndpoint if security is enabled
+ ep.PkiID = p.GetSecHelper().GetID()
+ }
+ return ep, err
+}
+
+func (p *PeerImpl) newHelloMessage() (*pb.HelloMessage, error) {
+ endpoint, err := p.GetPeerEndpoint()
+ if err != nil {
+ return nil, fmt.Errorf("Error creating hello message: %s", err)
+ }
+ p.ledgerWrapper.RLock()
+ defer p.ledgerWrapper.RUnlock()
+ //size := p.ledgerWrapper.ledger.GetBlockchainSize()
+ blockChainInfo, err := p.ledgerWrapper.ledger.GetBlockchainInfo()
+ if err != nil {
+ return nil, fmt.Errorf("Error creating hello message, error getting block chain info: %s", err)
+ }
+ return &pb.HelloMessage{PeerEndpoint: endpoint, BlockchainInfo: blockChainInfo}, nil
+}
+
+// GetBlockByNumber return a block by block number
+func (p *PeerImpl) GetBlockByNumber(blockNumber uint64) (*pb.Block, error) {
+ p.ledgerWrapper.RLock()
+ defer p.ledgerWrapper.RUnlock()
+ return p.ledgerWrapper.ledger.GetBlockByNumber(blockNumber)
+}
+
+// GetBlockchainSize returns the height/length of the blockchain
+func (p *PeerImpl) GetBlockchainSize() uint64 {
+ p.ledgerWrapper.RLock()
+ defer p.ledgerWrapper.RUnlock()
+ return p.ledgerWrapper.ledger.GetBlockchainSize()
+}
+
+// GetCurrentStateHash returns the current non-committed hash of the in memory state
+func (p *PeerImpl) GetCurrentStateHash() (stateHash []byte, err error) {
+ p.ledgerWrapper.RLock()
+ defer p.ledgerWrapper.RUnlock()
+ return p.ledgerWrapper.ledger.GetTempStateHash()
+}
+
+// HashBlock returns the hash of the included block, useful for mocking
+func (p *PeerImpl) HashBlock(block *pb.Block) ([]byte, error) {
+ return block.GetHash()
+}
+
+// VerifyBlockchain checks the integrity of the blockchain between indices start and finish,
+// returning the first block who's PreviousBlockHash field does not match the hash of the previous block
+func (p *PeerImpl) VerifyBlockchain(start, finish uint64) (uint64, error) {
+ p.ledgerWrapper.RLock()
+ defer p.ledgerWrapper.RUnlock()
+ return p.ledgerWrapper.ledger.VerifyChain(start, finish)
+}
+
+// ApplyStateDelta applies a state delta to the current state
+// The result of this function can be retrieved using GetCurrentStateDelta
+// To commit the result, call CommitStateDelta, or to roll it back
+// call RollbackStateDelta
+func (p *PeerImpl) ApplyStateDelta(id interface{}, delta *statemgmt.StateDelta) error {
+ p.ledgerWrapper.Lock()
+ defer p.ledgerWrapper.Unlock()
+ return p.ledgerWrapper.ledger.ApplyStateDelta(id, delta)
+}
+
+// CommitStateDelta makes the result of ApplyStateDelta permanent
+// and releases the resources necessary to rollback the delta
+func (p *PeerImpl) CommitStateDelta(id interface{}) error {
+ p.ledgerWrapper.Lock()
+ defer p.ledgerWrapper.Unlock()
+ return p.ledgerWrapper.ledger.CommitStateDelta(id)
+}
+
+// RollbackStateDelta undoes the results of ApplyStateDelta to revert
+// the current state back to the state before ApplyStateDelta was invoked
+func (p *PeerImpl) RollbackStateDelta(id interface{}) error {
+ p.ledgerWrapper.Lock()
+ defer p.ledgerWrapper.Unlock()
+ return p.ledgerWrapper.ledger.RollbackStateDelta(id)
+}
+
+// EmptyState completely empties the state and prepares it to restore a snapshot
+func (p *PeerImpl) EmptyState() error {
+ p.ledgerWrapper.Lock()
+ defer p.ledgerWrapper.Unlock()
+ return p.ledgerWrapper.ledger.DeleteALLStateKeysAndValues()
+}
+
+// GetStateSnapshot return the state snapshot
+func (p *PeerImpl) GetStateSnapshot() (*state.StateSnapshot, error) {
+ p.ledgerWrapper.RLock()
+ defer p.ledgerWrapper.RUnlock()
+ return p.ledgerWrapper.ledger.GetStateSnapshot()
+}
+
+// GetStateDelta return the state delta for the requested block number
+func (p *PeerImpl) GetStateDelta(blockNumber uint64) (*statemgmt.StateDelta, error) {
+ p.ledgerWrapper.RLock()
+ defer p.ledgerWrapper.RUnlock()
+ return p.ledgerWrapper.ledger.GetStateDelta(blockNumber)
+}
+
+// PutBlock inserts a raw block into the blockchain at the specified index, nearly no error checking is performed
+func (p *PeerImpl) PutBlock(blockNumber uint64, block *pb.Block) error {
+ p.ledgerWrapper.Lock()
+ defer p.ledgerWrapper.Unlock()
+ return p.ledgerWrapper.ledger.PutRawBlock(block, blockNumber)
+}
+
+// NewOpenchainDiscoveryHello constructs a new HelloMessage for sending
+func (p *PeerImpl) NewOpenchainDiscoveryHello() (*pb.Message, error) {
+ helloMessage, err := p.newHelloMessage()
+ if err != nil {
+ return nil, fmt.Errorf("Error getting new HelloMessage: %s", err)
+ }
+ data, err := proto.Marshal(helloMessage)
+ if err != nil {
+ return nil, fmt.Errorf("Error marshalling HelloMessage: %s", err)
+ }
+ // Need to sign the Discovery Hello message
+ newDiscoveryHelloMsg := &pb.Message{Type: pb.Message_DISC_HELLO, Payload: data, Timestamp: util.CreateUtcTimestamp()}
+ err = p.signMessageMutating(newDiscoveryHelloMsg)
+ if err != nil {
+ return nil, fmt.Errorf("Error signing new HelloMessage: %s", err)
+ }
+ return newDiscoveryHelloMsg, nil
+}
+
+// GetSecHelper returns the crypto.Peer
+func (p *PeerImpl) GetSecHelper() crypto.Peer {
+ return p.secHelper
+}
+
+// signMessage modifies the passed in Message by setting the Signature based upon the Payload.
+func (p *PeerImpl) signMessageMutating(msg *pb.Message) error {
+ if SecurityEnabled() {
+ sig, err := p.secHelper.Sign(msg.Payload)
+ if err != nil {
+ return fmt.Errorf("Error signing Openchain Message: %s", err)
+ }
+ // Set the signature in the message
+ msg.Signature = sig
+ }
+ return nil
+}
+
+// initDiscovery load the addresses from the discovery list previously saved to disk and adds them to the current discovery list
+func (p *PeerImpl) initDiscovery() []string {
+ p.discHelper = discovery.NewDiscoveryImpl()
+ p.discPersist = viper.GetBool("peer.discovery.persist")
+ if !p.discPersist {
+ peerLogger.Warning("Discovery list will not be persisted to disk")
+ }
+ addresses, err := p.LoadDiscoveryList() // load any previously saved addresses
+ if err != nil {
+ peerLogger.Errorf("%s", err)
+ }
+ for _, address := range addresses { // add them to the current discovery list
+ _ = p.discHelper.AddNode(address)
+ }
+ peerLogger.Debugf("Retrieved discovery list from disk: %v", addresses)
+ // parse the config file, ENV flags, etc.
+ rootNodes := strings.Split(viper.GetString("peer.discovery.rootnode"), ",")
+ if !(len(rootNodes) == 1 && strings.Compare(rootNodes[0], "") == 0) {
+ addresses = append(rootNodes, p.discHelper.GetAllNodes()...)
+ }
+ return addresses
+}
+
+// =============================================================================
+// Persistor
+// =============================================================================
+
+// Persistor enables a peer to persist and restore data to the database
+// TODO Move over the persist package from consensus down to the peer level
+type Persistor interface {
+ Store(key string, value []byte) error
+ Load(key string) ([]byte, error)
+}
+
+// Store enables a peer to persist the given key,value pair to the database
+func (p *PeerImpl) Store(key string, value []byte) error {
+ db := db.GetDBHandle()
+ return db.Put(db.PersistCF, []byte(key), value)
+}
+
+// Load enables a peer to read the value that corresponds to the given database key
+func (p *PeerImpl) Load(key string) ([]byte, error) {
+ db := db.GetDBHandle()
+ return db.Get(db.PersistCF, []byte(key))
+}
+
+// =============================================================================
+// Discoverer
+// =============================================================================
+
+// Discoverer enables a peer to access/persist/restore its discovery list
+type Discoverer interface {
+ DiscoveryAccessor
+ DiscoveryPersistor
+}
+
+// DiscoveryAccessor enables a peer to hand out its discovery object
+type DiscoveryAccessor interface {
+ GetDiscHelper() discovery.Discovery
+}
+
+// GetDiscHelper enables a peer to retrieve its discovery object
+func (p *PeerImpl) GetDiscHelper() discovery.Discovery {
+ return p.discHelper
+}
+
+// DiscoveryPersistor enables a peer to persist/restore its discovery list to/from the database
+type DiscoveryPersistor interface {
+ LoadDiscoveryList() ([]string, error)
+ StoreDiscoveryList() error
+}
+
+// StoreDiscoveryList enables a peer to persist the discovery list to the database
+func (p *PeerImpl) StoreDiscoveryList() error {
+ if !p.discPersist {
+ return nil
+ }
+ var err error
+ addresses := p.discHelper.GetAllNodes()
+ raw, err := proto.Marshal(&pb.PeersAddresses{Addresses: addresses})
+ if err != nil {
+ err = fmt.Errorf("Could not marshal discovery list message: %s", err)
+ peerLogger.Error(err)
+ return err
+ }
+ return p.Store("discovery", raw)
+}
+
+// LoadDiscoveryList enables a peer to load the discovery list from the database
+func (p *PeerImpl) LoadDiscoveryList() ([]string, error) {
+ var err error
+ packed, err := p.Load("discovery")
+ if err != nil {
+ err = fmt.Errorf("Unable to load discovery list from DB: %s", err)
+ peerLogger.Error(err)
+ return nil, err
+ }
+ addresses := &pb.PeersAddresses{}
+ err = proto.Unmarshal(packed, addresses)
+ if err != nil {
+ err = fmt.Errorf("Could not unmarshal discovery list message: %s", err)
+ peerLogger.Error(err)
+ }
+ return addresses.Addresses, err
+}
diff --git a/core/peer/peer_test.go b/core/peer/peer_test.go
new file mode 100644
index 00000000000..299a01bfdaa
--- /dev/null
+++ b/core/peer/peer_test.go
@@ -0,0 +1,124 @@
+/*
+Copyright IBM Corp. 2016 All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package peer
+
+import (
+ "fmt"
+ "io"
+ "os"
+ "testing"
+ "time"
+
+ "github.com/spf13/viper"
+
+ "github.com/hyperledger/fabric/core/config"
+ pb "github.com/hyperledger/fabric/protos"
+ "golang.org/x/net/context"
+ "google.golang.org/grpc"
+)
+
+var peerClientConn *grpc.ClientConn
+
+func TestMain(m *testing.M) {
+ config.SetupTestConfig("./../../peer")
+ viper.Set("ledger.blockchain.deploy-system-chaincode", "false")
+
+ tmpConn, err := NewPeerClientConnection()
+ if err != nil {
+ fmt.Printf("error connection to server at host:port = %s\n", viper.GetString("peer.address"))
+ os.Exit(1)
+ }
+ peerClientConn = tmpConn
+ os.Exit(m.Run())
+}
+
+func TestMissingMessageHandlerUnicast(t *testing.T) {
+ emptyHandlerMap := handlerMap{m: make(map[pb.PeerID]MessageHandler)}
+ peerImpl := PeerImpl{handlerMap: &emptyHandlerMap}
+ err := peerImpl.Unicast(nil, &pb.PeerID{})
+ if err == nil {
+ t.Error("Expected error with bad receiver handle, but there was none")
+ }
+}
+
+func performChat(t testing.TB, conn *grpc.ClientConn) error {
+ serverClient := pb.NewPeerClient(conn)
+ stream, err := serverClient.Chat(context.Background())
+ if err != nil {
+ t.Logf("%v.performChat(_) = _, %v", serverClient, err)
+ return err
+ }
+ defer stream.CloseSend()
+ t.Log("Starting performChat")
+
+ waitc := make(chan struct{})
+ go func() {
+ // Be sure to close the channel
+ defer close(waitc)
+ for {
+ in, err := stream.Recv()
+ if err == io.EOF {
+ t.Logf("Received EOR, exiting chat")
+ return
+ }
+ if err != nil {
+ t.Errorf("stream closed with unexpected error: %s", err)
+ return
+ }
+ if in.Type == pb.Message_DISC_HELLO {
+ t.Logf("Received message: %s, sending %s", in.Type, pb.Message_DISC_GET_PEERS)
+ stream.Send(&pb.Message{Type: pb.Message_DISC_GET_PEERS})
+ } else if in.Type == pb.Message_DISC_PEERS {
+ //stream.Send(&pb.DiscoveryMessage{Type: pb.DiscoveryMessage_PEERS})
+ t.Logf("Received message: %s", in.Type)
+ t.Logf("Closing stream and channel")
+ return
+ } else {
+ t.Logf("Received message: %s", in.Type)
+
+ }
+
+ }
+ }()
+ select {
+ case <-waitc:
+ return nil
+ case <-time.After(1 * time.Second):
+ t.Fail()
+ return fmt.Errorf("Timeout expired while performChat")
+ }
+}
+
+func Benchmark_Chat(b *testing.B) {
+ for i := 0; i < b.N; i++ {
+ performChat(b, peerClientConn)
+ }
+}
+
+func Benchmark_Chat_Parallel(b *testing.B) {
+ b.SetParallelism(10)
+ b.RunParallel(func(pb *testing.PB) {
+ for pb.Next() {
+ performChat(b, peerClientConn)
+ }
+ })
+}
+
+func TestServer_Chat(t *testing.T) {
+ t.Skip()
+ performChat(t, peerClientConn)
+}
diff --git a/core/peer/statetransfer/statetransfer.go b/core/peer/statetransfer/statetransfer.go
new file mode 100644
index 00000000000..fe77d743606
--- /dev/null
+++ b/core/peer/statetransfer/statetransfer.go
@@ -0,0 +1,848 @@
+/*
+Copyright IBM Corp. 2016 All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package statetransfer
+
+import (
+ "bytes"
+ "fmt"
+ "math/rand"
+ "sort"
+ "time"
+
+ _ "github.com/hyperledger/fabric/core" // Logging format init
+
+ "github.com/hyperledger/fabric/core/ledger/statemgmt"
+ "github.com/hyperledger/fabric/core/peer"
+ pb "github.com/hyperledger/fabric/protos"
+ "github.com/op/go-logging"
+ "github.com/spf13/viper"
+)
+
+// =============================================================================
+// init
+// =============================================================================
+
+var logger *logging.Logger // package-level logger
+
+func init() {
+ logger = logging.MustGetLogger("consensus/statetransfer")
+}
+
+// =============================================================================
+// public methods and structure definitions
+// =============================================================================
+
+// PartialStack is a subset of peer.MessageHandlerCoordinator functionality which is necessary to perform state transfer
+type PartialStack interface {
+ peer.BlockChainAccessor
+ peer.BlockChainModifier
+ peer.BlockChainUtil
+ GetPeers() (*pb.PeersMessage, error)
+ GetPeerEndpoint() (*pb.PeerEndpoint, error)
+ GetRemoteLedger(receiver *pb.PeerID) (peer.RemoteLedger, error)
+}
+
+// Coordinator is used to initiate state transfer. Start must be called before use, and Stop should be called to free allocated resources
+type Coordinator interface {
+ Start() // Start the block transfer go routine
+ Stop() // Stop up the block transfer go routine
+
+ // SyncToTarget attempts to move the state to the given target, returning an error, and whether this target might succeed if attempted at a later time
+ SyncToTarget(blockNumber uint64, blockHash []byte, peerIDs []*pb.PeerID) (error, bool)
+}
+
+// coordinatorImpl is the structure used to manage the state of state transfer
+type coordinatorImpl struct {
+ stack PartialStack
+
+ DiscoveryThrottleTime time.Duration // The amount of time to wait after discovering there are no connected peers
+
+ stateValid bool // Are we currently operating under the assumption that the state is valid?
+ inProgress bool // Set when state transfer is in progress so that the state may not be consistent
+
+ blockVerifyChunkSize uint64 // The max block length to attempt to sync at once, this prevents state transfer from being delayed while the blockchain is validated
+ validBlockRanges []*blockRange // Used by the block thread to track which pieces of the blockchain have already been hashed
+ RecoverDamage bool // Whether state transfer should ever modify or delete existing blocks if they are determined to be corrupted
+
+ blockSyncReq chan *blockSyncReq // Used to request a block sync, new requests cause the existing request to abort, write only from the state thread
+
+ threadExit chan struct{} // Used to inform the threads that we are shutting down
+
+ BlockRequestTimeout time.Duration // How long to wait for a peer to respond to a block request
+ StateDeltaRequestTimeout time.Duration // How long to wait for a peer to respond to a state delta request
+ StateSnapshotRequestTimeout time.Duration // How long to wait for a peer to respond to a state snapshot request
+
+ maxStateDeltas int // The maximum number of state deltas to attempt to retrieve before giving up and performing a full state snapshot retrieval
+ maxBlockRange uint64 // The maximum number blocks to attempt to retrieve at once, to prevent from overflowing the peer's buffer
+ maxStateDeltaRange uint64 // The maximum number of state deltas to attempt to retrieve at once, to prevent from overflowing the peer's buffer
+
+ currentStateBlockNumber uint64 // When state transfer does not complete successfully, the current state does not always correspond to the block height
+}
+
+// SyncToTarget consumes the calling thread and attempts to perform state transfer until success or an error occurs
+// If the peerIDs are nil, then all peers are assumed to have the given block.
+// If the call returns an error, a boolean is included which indicates if the error may be transient and the caller should retry
+func (sts *coordinatorImpl) SyncToTarget(blockNumber uint64, blockHash []byte, peerIDs []*pb.PeerID) (error, bool) {
+ logger.Debugf("Syncing to target %x for block number %d with peers %v", blockHash, blockNumber, peerIDs)
+
+ if !sts.inProgress {
+ sts.currentStateBlockNumber = sts.stack.GetBlockchainSize() - 1 // The block height is one more than the latest block number
+ sts.inProgress = true
+ }
+
+ err, recoverable := sts.attemptStateTransfer(blockNumber, peerIDs, blockHash)
+ if err == nil {
+ sts.inProgress = false
+ }
+
+ logger.Debugf("Sync to target %x for block number %d returned, now at block height %d with err=%v recoverable=%v", blockHash, blockNumber, sts.stack.GetBlockchainSize(), err, recoverable)
+ return err, recoverable
+}
+
+// Start starts the block thread go routine
+func (sts *coordinatorImpl) Start() {
+ go sts.blockThread()
+}
+
+// Stop stops the blockthread go routine
+func (sts *coordinatorImpl) Stop() {
+ select {
+ case <-sts.threadExit:
+ default:
+ close(sts.threadExit)
+ }
+}
+
+// =============================================================================
+// constructors
+// =============================================================================
+
+// NewCoordinatorImpl constructs a coordinatorImpl
+func NewCoordinatorImpl(stack PartialStack) Coordinator {
+ var err error
+ sts := &coordinatorImpl{}
+
+ sts.stack = stack
+
+ sts.RecoverDamage = viper.GetBool("statetransfer.recoverdamage")
+
+ sts.stateValid = true // Assume our starting state is correct unless told otherwise
+
+ sts.validBlockRanges = make([]*blockRange, 0)
+ sts.blockVerifyChunkSize = uint64(viper.GetInt("statetransfer.blocksperrequest"))
+ if sts.blockVerifyChunkSize == 0 {
+ panic(fmt.Errorf("Must set statetransfer.blocksperrequest to be nonzero"))
+ }
+
+ sts.blockSyncReq = make(chan *blockSyncReq)
+
+ sts.threadExit = make(chan struct{})
+
+ sts.DiscoveryThrottleTime = 1 * time.Second // TODO make this configurable
+
+ sts.BlockRequestTimeout, err = time.ParseDuration(viper.GetString("statetransfer.timeout.singleblock"))
+ if err != nil {
+ panic(fmt.Errorf("Cannot parse statetransfer.timeout.singleblock timeout: %s", err))
+ }
+ sts.StateDeltaRequestTimeout, err = time.ParseDuration(viper.GetString("statetransfer.timeout.singlestatedelta"))
+ if err != nil {
+ panic(fmt.Errorf("Cannot parse statetransfer.timeout.singlestatedelta timeout: %s", err))
+ }
+ sts.StateSnapshotRequestTimeout, err = time.ParseDuration(viper.GetString("statetransfer.timeout.fullstate"))
+ if err != nil {
+ panic(fmt.Errorf("Cannot parse statetransfer.timeout.fullstate timeout: %s", err))
+ }
+
+ sts.maxStateDeltas = viper.GetInt("statetransfer.maxdeltas")
+ if sts.maxStateDeltas <= 0 {
+ panic(fmt.Errorf("sts.maxdeltas must be greater than 0"))
+ }
+
+ tmp := viper.GetInt("peer.sync.blocks.channelSize")
+ if tmp <= 0 {
+ panic(fmt.Errorf("peer.sync.blocks.channelSize must be greater than 0"))
+ }
+ sts.maxBlockRange = uint64(tmp)
+
+ tmp = viper.GetInt("peer.sync.state.deltas.channelSize")
+ if tmp <= 0 {
+ panic(fmt.Errorf("peer.sync.state.deltas.channelSize must be greater than 0"))
+ }
+ sts.maxStateDeltaRange = uint64(tmp)
+
+ return sts
+}
+
+// =============================================================================
+// custom interfaces and structure definitions
+// =============================================================================
+
+type blockSyncReq struct {
+ blockNumber uint64
+ peerIDs []*pb.PeerID
+ reportOnBlock uint64
+ replyChan chan error
+ firstBlockHash []byte
+}
+
+type blockRange struct {
+ highBlock uint64
+ lowBlock uint64
+ lowNextHash []byte
+}
+
+type blockRangeSlice []*blockRange
+
+func (a blockRangeSlice) Len() int {
+ return len(a)
+}
+func (a blockRangeSlice) Swap(i, j int) {
+ a[i], a[j] = a[j], a[i]
+}
+func (a blockRangeSlice) Less(i, j int) bool {
+ if a[i].highBlock == a[j].highBlock {
+ // If the highs match, the bigger range comes first
+ return a[i].lowBlock < a[j].lowBlock
+ }
+ return a[i].highBlock > a[j].highBlock
+}
+
+// =============================================================================
+// helper functions for state transfer
+// =============================================================================
+
+// Executes a func trying each peer included in peerIDs until successful
+// Attempts to execute over all peers if peerIDs is nil
+func (sts *coordinatorImpl) tryOverPeers(passedPeerIDs []*pb.PeerID, do func(peerID *pb.PeerID) error) (err error) {
+
+ peerIDs := passedPeerIDs
+
+ ep, err := sts.stack.GetPeerEndpoint()
+
+ if err != nil {
+ // Unless we throttle here, this condition will likely cause a tight loop which will adversely affect the rest of the system
+ time.Sleep(sts.DiscoveryThrottleTime)
+ return fmt.Errorf("Error resolving our own PeerID, this shouldn't happen")
+ }
+
+ if nil == passedPeerIDs {
+ logger.Debugf("tryOverPeers: no peerIDs given, discovering")
+
+ peersMsg, err := sts.stack.GetPeers()
+ if err != nil {
+ return fmt.Errorf("Couldn't retrieve list of peers: %v", err)
+ }
+ peers := peersMsg.GetPeers()
+ for _, endpoint := range peers {
+ if endpoint.Type == pb.PeerEndpoint_VALIDATOR {
+ if endpoint.ID.Name == ep.ID.Name {
+ continue
+ }
+ peerIDs = append(peerIDs, endpoint.ID)
+ }
+ }
+
+ logger.Debugf("Discovered %d peerIDs", len(peerIDs))
+ }
+
+ logger.Debugf("tryOverPeers: using peerIDs: %v", peerIDs)
+
+ if 0 == len(peerIDs) {
+ logger.Errorf("Invoked tryOverPeers with no peers specified, throttling thread")
+ // Unless we throttle here, this condition will likely cause a tight loop which will adversely affect the rest of the system
+ time.Sleep(sts.DiscoveryThrottleTime)
+ return fmt.Errorf("No peers available to try over")
+ }
+
+ numReplicas := len(peerIDs)
+ startIndex := rand.Int() % numReplicas
+
+ for i := 0; i < numReplicas; i++ {
+ index := (i + startIndex) % numReplicas
+ err = do(peerIDs[index])
+ if err == nil {
+ break
+ } else {
+ logger.Warningf("tryOverPeers: loop error from %v : %s", peerIDs[index], err)
+ }
+ }
+
+ return err
+
+}
+
+// Attempts to complete a blockSyncReq using the supplied peers
+// Will return the last block number attempted to sync, and the last block successfully synced (or nil) and error on failure
+// This means on failure, the returned block corresponds to 1 higher than the returned block number
+func (sts *coordinatorImpl) syncBlocks(highBlock, lowBlock uint64, highHash []byte, peerIDs []*pb.PeerID) (uint64, *pb.Block, error) {
+ logger.Debugf("Syncing blocks from %d to %d with head hash of %x", highBlock, lowBlock, highHash)
+ validBlockHash := highHash
+ blockCursor := highBlock
+ var block *pb.Block
+ var goodRange *blockRange
+
+ err := sts.tryOverPeers(peerIDs, func(peerID *pb.PeerID) error {
+ for {
+ intermediateBlock := blockCursor + 1
+ var blockChan <-chan *pb.SyncBlocks
+ var err error
+ for {
+
+ if intermediateBlock == blockCursor+1 {
+ if sts.maxBlockRange > blockCursor {
+ // Don't underflow
+ intermediateBlock = 0
+ } else {
+ intermediateBlock = blockCursor - sts.maxBlockRange
+ }
+ if intermediateBlock < lowBlock {
+ intermediateBlock = lowBlock
+ }
+ logger.Debugf("Requesting block range from %d to %d", blockCursor, intermediateBlock)
+ blockChan, err = sts.GetRemoteBlocks(peerID, blockCursor, intermediateBlock)
+ }
+
+ if nil != err {
+ logger.Warningf("Failed to get blocks from %d to %d from %v: %s", blockCursor, lowBlock, peerID, err)
+ return err
+ }
+
+ select {
+ case syncBlockMessage, ok := <-blockChan:
+
+ if !ok {
+ return fmt.Errorf("Channel closed before we could finish reading")
+ }
+
+ if syncBlockMessage.Range.Start < syncBlockMessage.Range.End {
+ // If the message is not replying with blocks backwards, we did not ask for it
+ return fmt.Errorf("Received a block with wrong (increasing) order from %v, aborting", peerID)
+ }
+
+ var i int
+ for i, block = range syncBlockMessage.Blocks {
+ // It no longer correct to get duplication or out of range blocks, so we treat this as an error
+ if syncBlockMessage.Range.Start-uint64(i) != blockCursor {
+ return fmt.Errorf("Received a block out of order, indicating a buffer overflow or other corruption: start=%d, end=%d, wanted %d", syncBlockMessage.Range.Start, syncBlockMessage.Range.End, blockCursor)
+ }
+
+ testHash, err := sts.stack.HashBlock(block)
+ if nil != err {
+ return fmt.Errorf("Got a block %d which could not hash from %v: %s", blockCursor, peerID, err)
+ }
+
+ if !bytes.Equal(testHash, validBlockHash) {
+ return fmt.Errorf("Got block %d from %v with hash %x, was expecting hash %x", blockCursor, peerID, testHash, validBlockHash)
+ }
+
+ logger.Debugf("Putting block %d to with PreviousBlockHash %x and StateHash %x", blockCursor, block.PreviousBlockHash, block.StateHash)
+ if !sts.RecoverDamage {
+
+ // If we are not supposed to be destructive in our recovery, check to make sure this block doesn't already exist
+ if oldBlock, err := sts.stack.GetBlockByNumber(blockCursor); err == nil && oldBlock != nil {
+ oldBlockHash, err := sts.stack.HashBlock(oldBlock)
+ if nil == err {
+ if !bytes.Equal(oldBlockHash, validBlockHash) {
+ panic("The blockchain is corrupt and the configuration has specified that bad blocks should not be deleted/overridden")
+ }
+ } else {
+ logger.Errorf("Could not compute the hash of block %d", blockCursor)
+ panic("The blockchain is corrupt and the configuration has specified that bad blocks should not be deleted/overridden")
+ }
+ logger.Debugf("Not actually putting block %d to with PreviousBlockHash %x and StateHash %x, as it already exists", blockCursor, block.PreviousBlockHash, block.StateHash)
+ } else {
+ sts.stack.PutBlock(blockCursor, block)
+ }
+ } else {
+ sts.stack.PutBlock(blockCursor, block)
+ }
+
+ goodRange = &blockRange{
+ highBlock: highBlock,
+ lowBlock: blockCursor,
+ lowNextHash: block.PreviousBlockHash,
+ }
+
+ validBlockHash = block.PreviousBlockHash
+
+ if blockCursor == lowBlock {
+ logger.Debugf("Successfully synced from block %d to block %d", highBlock, lowBlock)
+ return nil
+ }
+ blockCursor--
+
+ }
+ case <-time.After(sts.BlockRequestTimeout):
+ return fmt.Errorf("Had block sync request to %v time out", peerID)
+ }
+ }
+ }
+ })
+
+ if nil != block {
+ logger.Debugf("Returned from sync with block %d and state hash %x", blockCursor, block.StateHash)
+ } else {
+ logger.Debugf("Returned from sync with no new blocks")
+ }
+
+ if goodRange != nil {
+ goodRange.lowNextHash = block.PreviousBlockHash
+ sts.validBlockRanges = append(sts.validBlockRanges, goodRange)
+ }
+
+ return blockCursor, block, err
+
+}
+
+func (sts *coordinatorImpl) syncBlockchainToTarget(blockSyncReq *blockSyncReq) {
+
+ logger.Debugf("Processing a blockSyncReq to block %d", blockSyncReq.blockNumber)
+
+ blockchainSize := sts.stack.GetBlockchainSize()
+
+ if blockSyncReq.blockNumber+1 < blockchainSize {
+ if !sts.RecoverDamage {
+ panic("The blockchain height is higher than advertised by consensus, the configuration has specified that bad blocks should not be deleted/overridden, so we cannot proceed")
+ } else {
+ // TODO For now, unimplemented because we have no way to delete blocks
+ panic("Our blockchain is already higher than a sync target, this is unlikely, but unimplemented")
+ }
+ } else {
+
+ _, _, err := sts.syncBlocks(blockSyncReq.blockNumber, blockSyncReq.reportOnBlock, blockSyncReq.firstBlockHash, blockSyncReq.peerIDs)
+
+ if nil != blockSyncReq.replyChan {
+ logger.Debugf("Replying to blockSyncReq on reply channel with : %s", err)
+ blockSyncReq.replyChan <- err
+ }
+ }
+}
+
+func (sts *coordinatorImpl) verifyAndRecoverBlockchain() bool {
+
+ if 0 == len(sts.validBlockRanges) {
+ size := sts.stack.GetBlockchainSize()
+ if 0 == size {
+ logger.Warningf("No blocks in the blockchain, including the genesis block")
+ return false
+ }
+
+ block, err := sts.stack.GetBlockByNumber(size - 1)
+ if nil != err {
+ logger.Warningf("Could not retrieve head block %d: %s", size, err)
+ return false
+ }
+
+ sts.validBlockRanges = append(sts.validBlockRanges, &blockRange{
+ highBlock: size - 1,
+ lowBlock: size - 1,
+ lowNextHash: block.PreviousBlockHash,
+ })
+
+ }
+
+ sort.Sort(blockRangeSlice(sts.validBlockRanges))
+
+ lowBlock := sts.validBlockRanges[0].lowBlock
+
+ logger.Debugf("Validating existing blockchain, highest validated block is %d, valid through %d", sts.validBlockRanges[0].highBlock, lowBlock)
+
+ if 1 == len(sts.validBlockRanges) {
+ if 0 == lowBlock {
+ // We have exactly one valid block range, and it is from 0 to at least the block height at startup, consider the chain valid
+ return true
+ }
+ }
+
+ lowNextHash := sts.validBlockRanges[0].lowNextHash
+ targetBlock := uint64(0)
+
+ if 1 < len(sts.validBlockRanges) {
+ if sts.validBlockRanges[1].highBlock+1 >= lowBlock {
+ // Ranges are not distinct (or are adjacent), we will collapse them or discard the lower if it does not chain
+ if sts.validBlockRanges[1].lowBlock < lowBlock {
+ // Range overlaps or is adjacent
+ block, err := sts.stack.GetBlockByNumber(lowBlock - 1) // Subtraction is safe here, lowBlock > 0
+ if nil != err {
+ logger.Warningf("Could not retrieve block %d, believed to be valid: %s", lowBlock-1, err)
+ } else {
+ if blockHash, err := sts.stack.HashBlock(block); err == nil {
+ if bytes.Equal(blockHash, lowNextHash) {
+ // The chains connect, no need to validate all the way down
+ sts.validBlockRanges[0].lowBlock = sts.validBlockRanges[1].lowBlock
+ sts.validBlockRanges[0].lowNextHash = sts.validBlockRanges[1].lowNextHash
+ } else {
+ logger.Warningf("Detected a block range starting at %d previously believed to be valid did not hash correctly", lowBlock-1)
+ }
+ } else {
+ logger.Warningf("Could not hash block %d, believed to be valid: %s", lowBlock-1, err)
+ }
+ }
+ } else {
+ // Range is a subset, we will simply delete
+ }
+
+ // If there was an error validating or retrieving, delete, if it was successful, delete
+ for j := 1; j < len(sts.validBlockRanges)-1; j++ {
+ sts.validBlockRanges[j] = (sts.validBlockRanges)[j+1]
+ }
+ sts.validBlockRanges = sts.validBlockRanges[:len(sts.validBlockRanges)-1]
+ logger.Debugf("Deleted from validBlockRanges, new length %d", len(sts.validBlockRanges))
+ return false
+ }
+
+ // Ranges are distinct and not adjacent
+ targetBlock = sts.validBlockRanges[1].highBlock
+ }
+
+ if targetBlock+sts.blockVerifyChunkSize > lowBlock {
+ // The sync range is small enough
+ } else {
+ // targetBlock >=0, targetBlock+blockVerifyChunkSize <= lowBlock --> lowBlock - blockVerifyChunkSize >= 0
+ targetBlock = lowBlock - sts.blockVerifyChunkSize
+ }
+
+ lastGoodBlockNumber, err := sts.stack.VerifyBlockchain(lowBlock, targetBlock)
+
+ logger.Debugf("Verified chain from %d to %d, with target of %d", lowBlock, lastGoodBlockNumber, targetBlock)
+
+ if err != nil {
+ logger.Criticalf("Something went wrong validating the blockchain, recover may be impossible: %s", err)
+ return false
+ }
+
+ lastGoodBlock, err := sts.stack.GetBlockByNumber(lastGoodBlockNumber)
+ if nil != err {
+ logger.Errorf("Could not retrieve block %d, believed to be valid: %s", lowBlock-1, err)
+ return false
+ }
+
+ sts.validBlockRanges[0].lowBlock = lastGoodBlockNumber
+ sts.validBlockRanges[0].lowNextHash = lastGoodBlock.PreviousBlockHash
+
+ if targetBlock < lastGoodBlockNumber {
+ sts.syncBlocks(lastGoodBlockNumber-1, targetBlock, lastGoodBlock.PreviousBlockHash, nil)
+ }
+
+ return false
+}
+
+func (sts *coordinatorImpl) blockThread() {
+
+ toggleOn := make(chan struct{})
+ close(toggleOn)
+ var toggleOff chan struct{}
+
+ // toggleOn causes the toggle case to always be able to be selected
+ // toggleOff causes the toggle case to never be selected
+ toggle := toggleOn
+
+ for {
+ select {
+ case blockSyncReq := <-sts.blockSyncReq:
+ sts.syncBlockchainToTarget(blockSyncReq)
+ toggle = toggleOn
+ case <-toggle:
+ // If there is no target to sync to, make sure the rest of the chain is valid
+ if !sts.verifyAndRecoverBlockchain() {
+ // There is more verification to be done, so loop
+ continue
+ }
+ logger.Infof("Validated blockchain to the genesis block")
+ toggle = toggleOff
+ case <-sts.threadExit:
+ logger.Debug("Received request for block transfer thread to exit (1)")
+ return
+ }
+ }
+}
+
+func (sts *coordinatorImpl) attemptStateTransfer(blockNumber uint64, peerIDs []*pb.PeerID, blockHash []byte) (error, bool) {
+ var err error
+
+ if sts.currentStateBlockNumber+uint64(sts.maxStateDeltas) < blockNumber {
+ sts.stateValid = false
+ }
+
+ if !sts.stateValid {
+ // Our state is currently bad, so get a new one
+ sts.currentStateBlockNumber, err = sts.syncStateSnapshot(blockNumber, peerIDs)
+
+ if nil != err {
+ return fmt.Errorf("Could not retrieve state as recent as %d from any of specified peers", blockNumber), true
+ }
+
+ logger.Debugf("Completed state transfer to block %d", sts.currentStateBlockNumber)
+ }
+
+ // TODO, eventually we should allow lower block numbers and rewind transactions as needed
+ if blockNumber < sts.currentStateBlockNumber {
+ return fmt.Errorf("Cannot validate its state, because its current state corresponds to a higher block number %d than was supplied %d", sts.currentStateBlockNumber, blockNumber), false
+ }
+
+ blockReplyChannel := make(chan error)
+
+ req := &blockSyncReq{
+ blockNumber: blockNumber,
+ peerIDs: peerIDs,
+ reportOnBlock: sts.currentStateBlockNumber,
+ replyChan: blockReplyChannel,
+ firstBlockHash: blockHash,
+ }
+
+ select {
+ case sts.blockSyncReq <- req:
+ case <-sts.threadExit:
+ logger.Debug("Received request to exit while calling thread waiting for block sync reply")
+ return fmt.Errorf("Interrupted with request to exit while waiting for block sync reply."), false
+ }
+
+ logger.Debugf("State transfer thread waiting for block sync to complete")
+ select {
+ case err = <-blockReplyChannel:
+ case <-sts.threadExit:
+ return fmt.Errorf("Interrupted while waiting for block sync reply"), false
+ }
+ logger.Debugf("State transfer thread continuing")
+
+ if err != nil {
+ return fmt.Errorf("Could not retrieve all blocks as recent as %d as requested: %s", blockNumber, err), true
+ }
+
+ stateHash, err := sts.stack.GetCurrentStateHash()
+ if nil != err {
+ sts.stateValid = false
+ return fmt.Errorf("Could not compute its current state hash: %s", err), true
+
+ }
+
+ block, err := sts.stack.GetBlockByNumber(sts.currentStateBlockNumber)
+ if err != nil {
+ return fmt.Errorf("Could not get block %d though we just retrieved it: %s", sts.currentStateBlockNumber, err), true
+ }
+
+ if !bytes.Equal(stateHash, block.StateHash) {
+ if sts.stateValid {
+ sts.stateValid = false
+ return fmt.Errorf("Believed its state for block %d to be valid, but its hash (%x) did not match the recovered blockchain's (%x)", sts.currentStateBlockNumber, stateHash, block.StateHash), true
+ }
+ return fmt.Errorf("Recovered to an incorrect state at block number %d, (%x, %x)", sts.currentStateBlockNumber, stateHash, block.StateHash), true
+ }
+
+ logger.Debugf("State is now valid at block %d and hash %x", sts.currentStateBlockNumber, stateHash)
+
+ sts.stateValid = true
+
+ if sts.currentStateBlockNumber < blockNumber {
+ err = sts.playStateUpToBlockNumber(blockNumber, peerIDs)
+ if nil != err {
+ // This is unlikely, in the future, we may wish to play transactions forward rather than retry
+ sts.stateValid = false
+ return fmt.Errorf("Was unable to play the state from block number %d forward to block %d: %s", sts.currentStateBlockNumber, blockNumber, err), true
+ }
+ }
+
+ return nil, true
+}
+
+func (sts *coordinatorImpl) playStateUpToBlockNumber(toBlockNumber uint64, peerIDs []*pb.PeerID) error {
+ logger.Debugf("Attempting to play state forward from %v to block %d", peerIDs, toBlockNumber)
+ var stateHash []byte
+ err := sts.tryOverPeers(peerIDs, func(peerID *pb.PeerID) error {
+
+ var deltaMessages <-chan *pb.SyncStateDeltas
+ for {
+
+ intermediateBlock := sts.currentStateBlockNumber + 1 + sts.maxStateDeltaRange
+ if intermediateBlock > toBlockNumber {
+ intermediateBlock = toBlockNumber
+ }
+ logger.Debugf("Requesting state delta range from %d to %d", sts.currentStateBlockNumber+1, intermediateBlock)
+ var err error
+ deltaMessages, err = sts.GetRemoteStateDeltas(peerID, sts.currentStateBlockNumber+1, intermediateBlock)
+
+ if err != nil {
+ return fmt.Errorf("Received an error while trying to get the state deltas for blocks %d through %d from %v", sts.currentStateBlockNumber+1, intermediateBlock, peerID)
+ }
+
+ for sts.currentStateBlockNumber < intermediateBlock {
+ select {
+ case deltaMessage, ok := <-deltaMessages:
+ if !ok {
+ return fmt.Errorf("Was only able to recover to block number %d when desired to recover to %d", sts.currentStateBlockNumber, toBlockNumber)
+ }
+
+ if deltaMessage.Range.Start != sts.currentStateBlockNumber+1 || deltaMessage.Range.End < deltaMessage.Range.Start || deltaMessage.Range.End > toBlockNumber {
+ return fmt.Errorf("Received a state delta from %v either in the wrong order (backwards) or not next in sequence, aborting, start=%d, end=%d", peerID, deltaMessage.Range.Start, deltaMessage.Range.End)
+ }
+
+ for _, delta := range deltaMessage.Deltas {
+ umDelta := &statemgmt.StateDelta{}
+ if err := umDelta.Unmarshal(delta); nil != err {
+ return fmt.Errorf("Received a corrupt state delta from %v : %s", peerID, err)
+ }
+ sts.stack.ApplyStateDelta(deltaMessage, umDelta)
+
+ success := false
+
+ testBlock, err := sts.stack.GetBlockByNumber(sts.currentStateBlockNumber + 1)
+
+ if err != nil {
+ logger.Warningf("Could not retrieve block %d, though it should be present", deltaMessage.Range.End)
+ } else {
+
+ stateHash, err = sts.stack.GetCurrentStateHash()
+ if err != nil {
+ logger.Warningf("Could not compute state hash for some reason: %s", err)
+ }
+ logger.Debugf("Played state forward from %v to block %d with StateHash (%x), block has StateHash (%x)", peerID, deltaMessage.Range.End, stateHash, testBlock.StateHash)
+ if bytes.Equal(testBlock.StateHash, stateHash) {
+ success = true
+ }
+ }
+
+ if !success {
+ if sts.stack.RollbackStateDelta(deltaMessage) != nil {
+ sts.stateValid = false
+ return fmt.Errorf("played state forward according to %v, but the state hash did not match, failed to roll back, invalidated state", peerID)
+ }
+ return fmt.Errorf("Played state forward according to %v, but the state hash did not match, rolled back", peerID)
+
+ }
+
+ if sts.stack.CommitStateDelta(deltaMessage) != nil {
+ sts.stateValid = false
+ return fmt.Errorf("Played state forward according to %v, hashes matched, but failed to commit, invalidated state", peerID)
+ }
+
+ logger.Debugf("Moved state from %d to %d", sts.currentStateBlockNumber, sts.currentStateBlockNumber+1)
+ sts.currentStateBlockNumber++
+
+ if sts.currentStateBlockNumber == toBlockNumber {
+ logger.Debugf("Caught up to block %d", sts.currentStateBlockNumber)
+ return nil
+ }
+ }
+
+ case <-time.After(sts.StateDeltaRequestTimeout):
+ logger.Warningf("Timed out during state delta recovery from %v", peerID)
+ return fmt.Errorf("timed out during state delta recovery from %v", peerID)
+ }
+ }
+ }
+
+ })
+ logger.Debugf("State is now valid at block %d and hash %x", sts.currentStateBlockNumber, stateHash)
+ return err
+}
+
+// This function will retrieve the current state from a peer.
+// Note that no state verification can occur yet, we must wait for the next target, so it is important
+// not to consider this state as valid
+func (sts *coordinatorImpl) syncStateSnapshot(minBlockNumber uint64, peerIDs []*pb.PeerID) (uint64, error) {
+
+ logger.Debugf("Attempting to retrieve state snapshot from %v", peerIDs)
+
+ currentStateBlock := uint64(0)
+
+ ok := sts.tryOverPeers(peerIDs, func(peerID *pb.PeerID) error {
+ logger.Debugf("Initiating state recovery from %v", peerID)
+
+ if err := sts.stack.EmptyState(); nil != err {
+ logger.Errorf("Could not empty the current state: %s", err)
+ }
+
+ stateChan, err := sts.GetRemoteStateSnapshot(peerID)
+
+ if err != nil {
+ return err
+ }
+
+ timer := time.NewTimer(sts.StateSnapshotRequestTimeout)
+ counter := 0
+
+ for {
+ select {
+ case piece, ok := <-stateChan:
+ if !ok {
+ return fmt.Errorf("had state snapshot channel close prematurely after %d deltas: %s", counter, err)
+ }
+ if 0 == len(piece.Delta) {
+ stateHash, err := sts.stack.GetCurrentStateHash()
+ if nil != err {
+ sts.stateValid = false
+ return fmt.Errorf("could not compute its current state hash: %x", err)
+
+ }
+
+ logger.Debugf("Received final piece of state snapshot from %v after %d deltas, now has hash %x", peerID, counter, stateHash)
+ return nil
+ }
+ umDelta := &statemgmt.StateDelta{}
+ if err := umDelta.Unmarshal(piece.Delta); nil != err {
+ return fmt.Errorf("received a corrupt delta from %v after %d deltas : %s", peerID, counter, err)
+ }
+ sts.stack.ApplyStateDelta(piece, umDelta)
+ currentStateBlock = piece.BlockNumber
+ if err := sts.stack.CommitStateDelta(piece); nil != err {
+ return fmt.Errorf("could not commit state delta from %v after %d deltas: %s", peerID, counter, err)
+ }
+ counter++
+ case <-timer.C:
+ return fmt.Errorf("Timed out during state recovery from %v", peerID)
+ }
+ }
+
+ })
+
+ return currentStateBlock, ok
+}
+
+// The below were stolen from helper.go, they should eventually be removed there, and probably made private here
+
+// GetRemoteBlocks will return a channel to stream blocks from the desired replicaID
+func (sts *coordinatorImpl) GetRemoteBlocks(replicaID *pb.PeerID, start, finish uint64) (<-chan *pb.SyncBlocks, error) {
+ remoteLedger, err := sts.stack.GetRemoteLedger(replicaID)
+ if nil != err {
+ return nil, err
+ }
+ return remoteLedger.RequestBlocks(&pb.SyncBlockRange{
+ Start: start,
+ End: finish,
+ })
+}
+
+// GetRemoteStateSnapshot will return a channel to stream a state snapshot from the desired replicaID
+func (sts *coordinatorImpl) GetRemoteStateSnapshot(replicaID *pb.PeerID) (<-chan *pb.SyncStateSnapshot, error) {
+ remoteLedger, err := sts.stack.GetRemoteLedger(replicaID)
+ if nil != err {
+ return nil, err
+ }
+ return remoteLedger.RequestStateSnapshot()
+}
+
+// GetRemoteStateDeltas will return a channel to stream a state snapshot deltas from the desired replicaID
+func (sts *coordinatorImpl) GetRemoteStateDeltas(replicaID *pb.PeerID, start, finish uint64) (<-chan *pb.SyncStateDeltas, error) {
+ remoteLedger, err := sts.stack.GetRemoteLedger(replicaID)
+ if nil != err {
+ return nil, err
+ }
+ return remoteLedger.RequestStateDeltas(&pb.SyncBlockRange{
+ Start: start,
+ End: finish,
+ })
+}
diff --git a/core/peer/statetransfer/statetransfer_mock_test.go b/core/peer/statetransfer/statetransfer_mock_test.go
new file mode 100644
index 00000000000..170e2763241
--- /dev/null
+++ b/core/peer/statetransfer/statetransfer_mock_test.go
@@ -0,0 +1,804 @@
+/*
+Copyright IBM Corp. 2016 All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package statetransfer
+
+import (
+ "bytes"
+ "encoding/binary"
+ "fmt"
+ "reflect"
+ "sync"
+ "testing"
+
+ "github.com/hyperledger/fabric/core/ledger/statemgmt"
+ "github.com/hyperledger/fabric/core/peer"
+ "github.com/hyperledger/fabric/protos"
+)
+
+type mockRequest int
+
+const (
+ SyncDeltas mockRequest = iota
+ SyncBlocks
+ SyncSnapshot
+)
+
+type mockResponse int
+
+const (
+ Normal mockResponse = iota
+ Corrupt
+ Timeout
+ OutOfOrder
+)
+
+func (r mockResponse) String() string {
+ switch r {
+ case Normal:
+ return "Normal"
+ case Corrupt:
+ return "Corrupt"
+ case Timeout:
+ return "Timeout"
+ }
+
+ return "ERROR"
+}
+
+type LedgerDirectory interface {
+ GetLedgerByPeerID(peerID *protos.PeerID) (peer.BlockChainAccessor, bool)
+}
+
+type HashLedgerDirectory struct {
+ remoteLedgers map[protos.PeerID]peer.BlockChainAccessor
+}
+
+func (hd *HashLedgerDirectory) GetLedgerByPeerID(peerID *protos.PeerID) (peer.BlockChainAccessor, bool) {
+ ledger, ok := hd.remoteLedgers[*peerID]
+ return ledger, ok
+}
+
+func (hd *HashLedgerDirectory) GetPeers() (*protos.PeersMessage, error) {
+ _, network, err := hd.GetNetworkInfo()
+ return &protos.PeersMessage{Peers: network}, err
+}
+
+func (hd *HashLedgerDirectory) GetPeerEndpoint() (*protos.PeerEndpoint, error) {
+ self, _, err := hd.GetNetworkInfo()
+ return self, err
+}
+
+func (hd *HashLedgerDirectory) GetNetworkInfo() (self *protos.PeerEndpoint, network []*protos.PeerEndpoint, err error) {
+ network = make([]*protos.PeerEndpoint, len(hd.remoteLedgers)+1)
+ i := 0
+ for peerID := range hd.remoteLedgers {
+ peerID := peerID // Get a memory address which will not be overwritten
+ network[i] = &protos.PeerEndpoint{
+ ID: &peerID,
+ Type: protos.PeerEndpoint_VALIDATOR,
+ }
+ i++
+ }
+ network[i] = &protos.PeerEndpoint{
+ ID: &protos.PeerID{
+ Name: "SelfID",
+ },
+ Type: protos.PeerEndpoint_VALIDATOR,
+ }
+
+ self = network[i]
+ return
+}
+
+func (hd *HashLedgerDirectory) GetNetworkHandles() (self *protos.PeerID, network []*protos.PeerID, err error) {
+ oSelf, oNetwork, err := hd.GetNetworkInfo()
+ if nil != err {
+ return
+ }
+
+ self = oSelf.ID
+ network = make([]*protos.PeerID, len(oNetwork))
+ for i, endpoint := range oNetwork {
+ network[i] = endpoint.ID
+ }
+ return
+}
+
+const MagicDeltaKey string = "The only key/string we ever use for deltas"
+
+type MockLedger struct {
+ cleanML *MockLedger
+ blocks map[uint64]*protos.Block
+ blockHeight uint64
+ state uint64
+ remoteLedgers LedgerDirectory
+ filter func(request mockRequest, peerID *protos.PeerID) mockResponse
+
+ mutex *sync.Mutex
+
+ txID interface{}
+ curBatch []*protos.Transaction
+ curResults []byte
+ preBatchState uint64
+
+ deltaID interface{}
+ preDeltaValue uint64
+
+ t *testing.T
+}
+
+func NewMockLedger(remoteLedgers LedgerDirectory, filter func(request mockRequest, peerID *protos.PeerID) mockResponse, t *testing.T) *MockLedger {
+ mock := &MockLedger{}
+ mock.mutex = &sync.Mutex{}
+ mock.blocks = make(map[uint64]*protos.Block)
+ mock.state = 0
+ mock.blockHeight = 0
+ mock.t = t
+
+ if nil == filter {
+ mock.filter = func(request mockRequest, peerID *protos.PeerID) mockResponse {
+ return Normal
+ }
+ } else {
+ mock.filter = filter
+ }
+
+ mock.remoteLedgers = remoteLedgers
+
+ return mock
+}
+
+func (mock *MockLedger) GetBlockchainSize() uint64 {
+ mock.mutex.Lock()
+ defer func() {
+ mock.mutex.Unlock()
+ }()
+ return mock.blockHeight
+}
+
+func (mock *MockLedger) GetBlock(id uint64) (*protos.Block, error) {
+ mock.mutex.Lock()
+ defer func() {
+ mock.mutex.Unlock()
+ }()
+ block, ok := mock.blocks[id]
+ if !ok {
+ return nil, fmt.Errorf("Block not found")
+ }
+ return block, nil
+}
+
+func (mock *MockLedger) GetBlockByNumber(blockNumber uint64) (block *protos.Block, err error) {
+ return mock.GetBlock(blockNumber)
+}
+
+func (mock *MockLedger) HashBlock(block *protos.Block) ([]byte, error) {
+ return SimpleHashBlock(block), nil
+}
+
+type remoteLedger struct {
+ mockLedger *MockLedger
+ peerID *protos.PeerID
+}
+
+func (rl *remoteLedger) RequestBlocks(rng *protos.SyncBlockRange) (<-chan *protos.SyncBlocks, error) {
+ return rl.mockLedger.GetRemoteBlocks(rl.peerID, rng.Start, rng.End)
+}
+func (rl *remoteLedger) RequestStateSnapshot() (<-chan *protos.SyncStateSnapshot, error) {
+ return rl.mockLedger.GetRemoteStateSnapshot(rl.peerID)
+}
+func (rl *remoteLedger) RequestStateDeltas(rng *protos.SyncBlockRange) (<-chan *protos.SyncStateDeltas, error) {
+ return rl.mockLedger.GetRemoteStateDeltas(rl.peerID, rng.Start, rng.End)
+}
+
+func (mock *MockLedger) GetRemoteLedger(peerID *protos.PeerID) (peer.RemoteLedger, error) {
+ return &remoteLedger{
+ mockLedger: mock,
+ peerID: peerID,
+ }, nil
+}
+
+func (mock *MockLedger) GetRemoteBlocks(peerID *protos.PeerID, start, finish uint64) (<-chan *protos.SyncBlocks, error) {
+ rl, ok := mock.remoteLedgers.GetLedgerByPeerID(peerID)
+ if !ok {
+ return nil, fmt.Errorf("Bad peer ID %v", peerID)
+ }
+
+ var size int
+ if start > finish {
+ size = int(start - finish)
+ } else {
+ size = int(finish - start)
+ }
+
+ res := make(chan *protos.SyncBlocks, size+1) // Allows the thread to exit even if the consumer doesn't finish
+ ft := mock.filter(SyncBlocks, peerID)
+ if ft == Timeout {
+ return res, nil
+ }
+
+ go func() {
+
+ current := start
+ corruptBlock := start + (finish - start/2) // Try to pick a block in the middle, if possible
+
+ for {
+ switch {
+ case ft == Normal || (ft == Corrupt && current != corruptBlock):
+ if block, err := rl.GetBlockByNumber(current); nil == err {
+ res <- &protos.SyncBlocks{
+ Range: &protos.SyncBlockRange{
+ Start: current,
+ End: current,
+ },
+ Blocks: []*protos.Block{block},
+ }
+
+ } else {
+ fmt.Printf("TEST LEDGER: %v could not retrieve block %d : %s\n", peerID, current, err)
+ break
+ }
+ case ft == Corrupt:
+ res <- &protos.SyncBlocks{
+ Range: &protos.SyncBlockRange{
+ Start: current,
+ End: current,
+ },
+ Blocks: []*protos.Block{{
+ PreviousBlockHash: []byte("GARBAGE_BLOCK_HASH"),
+ StateHash: []byte("GARBAGE_STATE_HASH"),
+ Transactions: []*protos.Transaction{
+ {
+ Payload: []byte("GARBAGE_PAYLOAD"),
+ },
+ },
+ }},
+ }
+ case ft == OutOfOrder:
+ // Get an adjacent block, if available
+ outOfOrder := current + 1
+ block, err := rl.GetBlockByNumber(outOfOrder)
+ if err != nil {
+ outOfOrder = current - 1
+ block, err = rl.GetBlockByNumber(outOfOrder)
+ if err != nil {
+ block = &protos.Block{}
+ }
+ }
+
+ fmt.Printf("ASDF: Request block %d but sending block %d", current, outOfOrder)
+
+ res <- &protos.SyncBlocks{
+ Range: &protos.SyncBlockRange{
+ Start: outOfOrder,
+ End: outOfOrder,
+ },
+ Blocks: []*protos.Block{block},
+ }
+ default:
+ mock.t.Fatalf("Unsupported filter result %d", ft)
+ }
+
+ if current == finish {
+ break
+ }
+
+ if start < finish {
+ current++
+ } else {
+ current--
+ }
+ }
+ }()
+
+ return res, nil
+}
+
+func (mock *MockLedger) GetRemoteStateSnapshot(peerID *protos.PeerID) (<-chan *protos.SyncStateSnapshot, error) {
+
+ rl, ok := mock.remoteLedgers.GetLedgerByPeerID(peerID)
+ if !ok {
+ return nil, fmt.Errorf("Bad peer ID %v", peerID)
+ }
+
+ remoteBlockHeight := rl.GetBlockchainSize()
+ res := make(chan *protos.SyncStateSnapshot, remoteBlockHeight) // Allows the thread to exit even if the consumer doesn't finish
+ ft := mock.filter(SyncSnapshot, peerID)
+
+ if ft == Timeout {
+ return res, nil
+ }
+
+ if remoteBlockHeight < 1 {
+ close(res)
+ return res, nil
+ }
+ rds, err := mock.getRemoteStateDeltas(peerID, 0, remoteBlockHeight-1, SyncSnapshot)
+ if nil != err {
+ return nil, err
+ }
+ go func() {
+ switch ft {
+ case OutOfOrder:
+ fallthrough // This is an equivalent case to corruption, as we cannot detect out of order
+ case Corrupt:
+ res <- &protos.SyncStateSnapshot{
+ Delta: []byte("GARBAGE_DELTA"),
+ Sequence: 0,
+ BlockNumber: ^uint64(0),
+ Request: nil,
+ }
+ fallthrough
+ case Normal:
+ i := uint64(0)
+ for deltas := range rds {
+ for _, delta := range deltas.Deltas {
+ res <- &protos.SyncStateSnapshot{
+ Delta: delta,
+ Sequence: i,
+ BlockNumber: remoteBlockHeight - 1,
+ Request: nil,
+ }
+ i++
+ }
+ if i == remoteBlockHeight {
+ break
+ }
+ }
+ res <- &protos.SyncStateSnapshot{
+ Delta: []byte{},
+ Sequence: i,
+ BlockNumber: ^uint64(0),
+ Request: nil,
+ }
+ default:
+ mock.t.Fatalf("Unsupported filter result %d", ft)
+ }
+ }()
+ return res, nil
+}
+
+func (mock *MockLedger) GetRemoteStateDeltas(peerID *protos.PeerID, start, finish uint64) (<-chan *protos.SyncStateDeltas, error) {
+ return mock.getRemoteStateDeltas(peerID, start, finish, SyncDeltas)
+}
+
+func (mock *MockLedger) getRemoteStateDeltas(peerID *protos.PeerID, start, finish uint64, requestType mockRequest) (<-chan *protos.SyncStateDeltas, error) {
+ rl, ok := mock.remoteLedgers.GetLedgerByPeerID(peerID)
+
+ if !ok {
+ return nil, fmt.Errorf("Bad peer ID %v", peerID)
+ }
+
+ var size int
+ if start > finish {
+ size = int(start - finish)
+ } else {
+ size = int(finish - start)
+ }
+
+ res := make(chan *protos.SyncStateDeltas, size+1) // Allows the thread to exit even if the consumer doesn't finish
+ ft := mock.filter(requestType, peerID)
+ if ft == Timeout {
+ return res, nil
+ }
+ go func() {
+ current := start
+ corruptBlock := start + (finish - start/2) // Try to pick a block in the middle, if possible
+ for {
+ switch {
+ case ft == Normal || (ft == Corrupt && current != corruptBlock):
+ if remoteBlock, err := rl.GetBlockByNumber(current); nil == err {
+ deltas := make([][]byte, len(remoteBlock.Transactions))
+ for i, transaction := range remoteBlock.Transactions {
+ deltas[i] = SimpleBytesToStateDelta(transaction.Payload).Marshal()
+ }
+ res <- &protos.SyncStateDeltas{
+ Range: &protos.SyncBlockRange{
+ Start: current,
+ End: current,
+ },
+ Deltas: deltas,
+ }
+ } else {
+ break
+ }
+ case ft == OutOfOrder:
+ // Get an adjacent block, if available
+ outOfOrder := current + 1
+ remoteBlock, err := rl.GetBlockByNumber(outOfOrder)
+ if err != nil {
+ outOfOrder = current - 1
+ remoteBlock, err = rl.GetBlockByNumber(outOfOrder)
+ if err != nil {
+ remoteBlock = &protos.Block{}
+ }
+ }
+
+ fmt.Printf("ASDF: Request block %d but sending block %d", current, outOfOrder)
+
+ deltas := make([][]byte, len(remoteBlock.Transactions))
+ for i, transaction := range remoteBlock.Transactions {
+ deltas[i] = SimpleBytesToStateDelta(transaction.Payload).Marshal()
+ }
+ res <- &protos.SyncStateDeltas{
+ Range: &protos.SyncBlockRange{
+ Start: outOfOrder,
+ End: outOfOrder,
+ },
+ Deltas: deltas,
+ }
+
+ case ft == Corrupt:
+ deltas := [][]byte{
+ []byte("GARBAGE_DELTA"),
+ }
+ res <- &protos.SyncStateDeltas{
+ Range: &protos.SyncBlockRange{
+ Start: current,
+ End: current,
+ },
+ Deltas: deltas,
+ }
+ default:
+ mock.t.Fatalf("Unsupported filter result %d", ft)
+
+ }
+
+ if current == finish {
+ break
+ }
+
+ if start < finish {
+ current++
+ } else {
+ current--
+ }
+ }
+ }()
+ return res, nil
+}
+
+func (mock *MockLedger) PutBlock(blockNumber uint64, block *protos.Block) error {
+ mock.mutex.Lock()
+ defer func() {
+ mock.mutex.Unlock()
+ }()
+ mock.blocks[blockNumber] = block
+ if blockNumber >= mock.blockHeight {
+ mock.blockHeight = blockNumber + 1
+ }
+ return nil
+}
+
+func (mock *MockLedger) ApplyStateDelta(id interface{}, delta *statemgmt.StateDelta) error {
+ mock.mutex.Lock()
+ defer func() {
+ mock.mutex.Unlock()
+ }()
+
+ if nil != mock.deltaID {
+ if !reflect.DeepEqual(id, mock.deltaID) {
+ return fmt.Errorf("A different state delta is already being applied")
+ }
+ } else {
+ mock.deltaID = id
+ mock.preDeltaValue = mock.state
+ }
+
+ d, r := binary.Uvarint(SimpleStateDeltaToBytes(delta))
+ if r <= 0 {
+ return fmt.Errorf("State delta could not be applied, was not a uint64, %x", d)
+ }
+ if !delta.RollBackwards {
+ mock.state += d
+ } else {
+ mock.state -= d
+ }
+ return nil
+}
+
+func (mock *MockLedger) CommitStateDelta(id interface{}) error {
+ mock.mutex.Lock()
+ defer func() {
+ mock.mutex.Unlock()
+ }()
+
+ mock.deltaID = nil
+ return nil
+}
+
+func (mock *MockLedger) RollbackStateDelta(id interface{}) error {
+ mock.mutex.Lock()
+ defer func() {
+ mock.mutex.Unlock()
+ }()
+ mock.deltaID = nil
+
+ mock.state = mock.preDeltaValue
+ return nil
+}
+
+func (mock *MockLedger) EmptyState() error {
+ mock.mutex.Lock()
+ defer func() {
+ mock.mutex.Unlock()
+ }()
+ mock.state = 0
+ return nil
+}
+
+func (mock *MockLedger) GetCurrentStateHash() ([]byte, error) {
+ mock.mutex.Lock()
+ defer func() {
+ mock.mutex.Unlock()
+ }()
+ return []byte(fmt.Sprintf("%d", mock.state)), nil
+}
+
+func (mock *MockLedger) VerifyBlockchain(start, finish uint64) (uint64, error) {
+ current := start
+
+ cb, err := mock.GetBlock(current)
+ if nil != err {
+ return current, err
+ }
+
+ for {
+ if current == finish {
+ return finish, nil
+ }
+
+ next := current
+
+ if start < finish {
+ next++
+ } else {
+ next--
+ }
+
+ nb, err := mock.GetBlock(next)
+
+ if nil != err {
+ return current, nil
+ }
+
+ nbh, err := mock.HashBlock(nb)
+
+ if nil != err {
+ return current, nil
+ }
+
+ if !bytes.Equal(nbh, cb.PreviousBlockHash) {
+ return current, nil
+ }
+
+ cb = nb
+ current = next
+ }
+}
+
+// Used when the actual transaction content is irrelevant, useful for testing
+// state transfer, and other situations without requiring a simulated network
+type MockRemoteLedger struct {
+ blockHeight uint64
+}
+
+func (mock *MockRemoteLedger) setBlockHeight(blockHeight uint64) {
+ mock.blockHeight = blockHeight
+}
+
+func (mock *MockRemoteLedger) GetBlockByNumber(blockNumber uint64) (block *protos.Block, err error) {
+ if blockNumber >= mock.blockHeight {
+ return nil, fmt.Errorf("Request block above block height")
+ }
+ return SimpleGetBlock(blockNumber), nil
+}
+
+func (mock *MockRemoteLedger) GetBlockchainSize() uint64 {
+ return mock.blockHeight
+}
+
+func (mock *MockRemoteLedger) GetCurrentStateHash() (stateHash []byte, err error) {
+ return SimpleEncodeUint64(SimpleGetState(mock.blockHeight - 1)), nil
+}
+
+func SimpleEncodeUint64(num uint64) []byte {
+ result := make([]byte, binary.MaxVarintLen64)
+ binary.PutUvarint(result, num)
+ return result
+}
+
+func SimpleHashBlock(block *protos.Block) []byte {
+ buffer := make([]byte, binary.MaxVarintLen64)
+ for _, transaction := range block.Transactions {
+ for i, b := range transaction.Payload {
+ buffer[i%binary.MaxVarintLen64] += b
+ }
+ }
+ return []byte(fmt.Sprintf("BlockHash:%s-%s-%s", buffer, block.StateHash, block.ConsensusMetadata))
+}
+
+func SimpleGetState(blockNumber uint64) uint64 {
+ // The simple state is (blockNumber) * (blockNumber + 1) / 2
+ var computedState uint64
+ if 0 == blockNumber%2 {
+ computedState = blockNumber / 2 * (blockNumber + 1)
+ } else {
+ computedState = (blockNumber + 1) / 2 * blockNumber
+ }
+ return computedState
+}
+
+func SimpleGetStateDelta(blockNumber uint64) []byte {
+ return SimpleEncodeUint64(blockNumber)
+}
+
+func SimpleGetStateHash(blockNumber uint64) []byte {
+ return []byte(fmt.Sprintf("%d", SimpleGetState(blockNumber)))
+}
+
+func SimpleGetTransactions(blockNumber uint64) []*protos.Transaction {
+ return []*protos.Transaction{{
+ Payload: SimpleGetStateDelta(blockNumber),
+ }}
+}
+
+func SimpleBytesToStateDelta(bDelta []byte) *statemgmt.StateDelta {
+ mDelta := &statemgmt.StateDelta{
+ RollBackwards: false,
+ }
+ mDelta.ChaincodeStateDeltas = make(map[string]*statemgmt.ChaincodeStateDelta)
+ mDelta.ChaincodeStateDeltas[MagicDeltaKey] = &statemgmt.ChaincodeStateDelta{}
+ mDelta.ChaincodeStateDeltas[MagicDeltaKey].UpdatedKVs = make(map[string]*statemgmt.UpdatedValue)
+ mDelta.ChaincodeStateDeltas[MagicDeltaKey].UpdatedKVs[MagicDeltaKey] = &statemgmt.UpdatedValue{Value: bDelta}
+ return mDelta
+}
+
+func SimpleStateDeltaToBytes(sDelta *statemgmt.StateDelta) []byte {
+ return sDelta.ChaincodeStateDeltas[MagicDeltaKey].UpdatedKVs[MagicDeltaKey].Value
+}
+
+func SimpleGetConsensusMetadata(blockNumber uint64) []byte {
+ return []byte(fmt.Sprintf("ConsensusMetaData:%d", blockNumber))
+}
+
+func SimpleGetBlockHash(blockNumber uint64) []byte {
+ if blockNumber == ^uint64(0) {
+ // This occurs only when we are the genesis block
+ return []byte("GenesisHash")
+ }
+ return SimpleHashBlock(&protos.Block{
+ Transactions: SimpleGetTransactions(blockNumber),
+ ConsensusMetadata: SimpleGetConsensusMetadata(blockNumber),
+ StateHash: SimpleGetStateHash(blockNumber),
+ })
+}
+
+func SimpleGetBlock(blockNumber uint64) *protos.Block {
+ return &protos.Block{
+ Transactions: SimpleGetTransactions(blockNumber),
+ ConsensusMetadata: SimpleGetConsensusMetadata(blockNumber),
+ StateHash: SimpleGetStateHash(blockNumber),
+ PreviousBlockHash: SimpleGetBlockHash(blockNumber - 1),
+ }
+}
+
+func TestMockLedger(t *testing.T) {
+ remoteLedgers := make(map[protos.PeerID]peer.BlockChainAccessor)
+ rl := &MockRemoteLedger{11}
+ rlPeerID := &protos.PeerID{
+ Name: "TestMockLedger",
+ }
+ remoteLedgers[*rlPeerID] = rl
+
+ ml := NewMockLedger(&HashLedgerDirectory{remoteLedgers}, nil, t)
+ ml.GetCurrentStateHash()
+
+ blockMessages, err := ml.GetRemoteBlocks(rlPeerID, 10, 0)
+
+ success := false
+
+ for blockMessage := range blockMessages {
+ current := blockMessage.Range.Start
+ i := 0
+ for {
+ _ = ml.PutBlock(current, blockMessage.Blocks[i]) // Never fails
+ i++
+
+ if current == blockMessage.Range.End {
+ break
+ }
+
+ if blockMessage.Range.Start < blockMessage.Range.End {
+ current++
+ } else {
+ current--
+ }
+ }
+ if current == 0 {
+ success = true
+ break
+ }
+ }
+
+ if !success {
+ t.Fatalf("Expected more blocks before channel close")
+ }
+
+ blockNumber, err := ml.VerifyBlockchain(10, 0)
+
+ if nil != err {
+ t.Fatalf("Retrieved blockchain did not validate at block %d with error '%s', error in mock ledger implementation.", blockNumber, err)
+ }
+
+ if blockNumber != 0 {
+ t.Fatalf("Retrieved blockchain did not validate at block %d, error in mock ledger implementation.", blockNumber)
+ }
+
+ _ = ml.PutBlock(3, &protos.Block{ // Never fails
+ PreviousBlockHash: []byte("WRONG"),
+ StateHash: []byte("WRONG"),
+ })
+
+ blockNumber, err = ml.VerifyBlockchain(10, 0)
+
+ if blockNumber != 4 {
+ t.Fatalf("Mangled blockchain did not detect the correct block with the wrong hash, error in mock ledger implementation.")
+ }
+
+ syncStateMessages, err := ml.GetRemoteStateSnapshot(rlPeerID)
+
+ if nil != err {
+ t.Fatalf("Remote state snapshot call failed, error in mock ledger implementation: %s", err)
+ }
+
+ success = false
+ _ = ml.EmptyState() // Never fails
+ for syncStateMessage := range syncStateMessages {
+ if 0 == len(syncStateMessage.Delta) {
+ success = true
+ break
+ }
+
+ delta := &statemgmt.StateDelta{}
+ if err := delta.Unmarshal(syncStateMessage.Delta); nil != err {
+ t.Fatalf("Error unmarshaling state delta : %s", err)
+ }
+
+ if err := ml.ApplyStateDelta(blockNumber, delta); err != nil {
+ t.Fatalf("Error applying state delta : %s", err)
+ }
+
+ if err := ml.CommitStateDelta(blockNumber); err != nil {
+ t.Fatalf("Error committing state delta : %s", err)
+ }
+ }
+
+ if !success {
+ t.Fatalf("Expected nil slice to finish snapshot transfer")
+ }
+
+ block10, err := ml.GetBlock(10)
+
+ if nil != err {
+ t.Fatalf("Error retrieving block 10, which we should have, error in mock ledger implementation")
+ }
+ stateHash, _ := ml.GetCurrentStateHash()
+ if !bytes.Equal(block10.StateHash, stateHash) {
+ t.Fatalf("Ledger state hash %s and block state hash %s do not match, error in mock ledger implementation", stateHash, block10.StateHash)
+ }
+}
diff --git a/core/peer/statetransfer/statetransfer_test.go b/core/peer/statetransfer/statetransfer_test.go
new file mode 100644
index 00000000000..5e72d7b94fc
--- /dev/null
+++ b/core/peer/statetransfer/statetransfer_test.go
@@ -0,0 +1,620 @@
+/*
+Copyright IBM Corp. 2016 All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package statetransfer
+
+import (
+ "bytes"
+ "fmt"
+ "os"
+ "sort"
+ "sync"
+ "testing"
+ "time"
+
+ configSetup "github.com/hyperledger/fabric/core/config"
+ "github.com/hyperledger/fabric/core/peer"
+ "github.com/hyperledger/fabric/protos"
+
+ "github.com/op/go-logging"
+)
+
+func init() {
+ logging.SetLevel(logging.DEBUG, "")
+}
+
+var AllFailures = [...]mockResponse{Timeout, Corrupt, OutOfOrder}
+
+type testPartialStack struct {
+ *MockRemoteHashLedgerDirectory
+ *MockLedger
+}
+
+func TestMain(m *testing.M) {
+ configSetup.SetupTestConfig("./../../../peer")
+ os.Exit(m.Run())
+}
+
+func newPartialStack(ml *MockLedger, rld *MockRemoteHashLedgerDirectory) PartialStack {
+ return &testPartialStack{
+ MockLedger: ml,
+ MockRemoteHashLedgerDirectory: rld,
+ }
+}
+
+func newTestStateTransfer(ml *MockLedger, rld *MockRemoteHashLedgerDirectory) *coordinatorImpl {
+ ci := NewCoordinatorImpl(newPartialStack(ml, rld)).(*coordinatorImpl)
+ ci.Start()
+ return ci
+}
+
+func newTestThreadlessStateTransfer(ml *MockLedger, rld *MockRemoteHashLedgerDirectory) *coordinatorImpl {
+ return NewCoordinatorImpl(newPartialStack(ml, rld)).(*coordinatorImpl)
+}
+
+type MockRemoteHashLedgerDirectory struct {
+ *HashLedgerDirectory
+}
+
+func (mrls *MockRemoteHashLedgerDirectory) GetMockRemoteLedgerByPeerID(peerID *protos.PeerID) *MockRemoteLedger {
+ ml, _ := mrls.GetLedgerByPeerID(peerID)
+ return ml.(*MockRemoteLedger)
+}
+
+func createRemoteLedgers(low, high uint64) *MockRemoteHashLedgerDirectory {
+ rols := make(map[protos.PeerID]peer.BlockChainAccessor)
+
+ for i := low; i <= high; i++ {
+ peerID := &protos.PeerID{
+ Name: fmt.Sprintf("Peer %d", i),
+ }
+ l := &MockRemoteLedger{}
+ rols[*peerID] = l
+ }
+ return &MockRemoteHashLedgerDirectory{&HashLedgerDirectory{rols}}
+}
+
+func executeStateTransfer(sts *coordinatorImpl, ml *MockLedger, blockNumber, sequenceNumber uint64, mrls *MockRemoteHashLedgerDirectory) error {
+
+ for peerID := range mrls.remoteLedgers {
+ mrls.GetMockRemoteLedgerByPeerID(&peerID).blockHeight = blockNumber + 1
+ }
+
+ var err error
+
+ blockHash := SimpleGetBlockHash(blockNumber)
+ for i := 0; i < 100; i++ {
+ var recoverable bool
+ err, recoverable = sts.SyncToTarget(blockNumber, blockHash, nil)
+ if err == nil || !recoverable {
+ break
+ }
+ time.Sleep(10 * time.Millisecond)
+ // Try to sync for up to 10 seconds
+ }
+
+ if err != nil {
+ return err
+ }
+
+ if size := ml.GetBlockchainSize(); size != blockNumber+1 {
+ return fmt.Errorf("Blockchain should be caught up to block %d, but is only %d tall", blockNumber, size)
+ }
+
+ block, err := ml.GetBlock(blockNumber)
+
+ if nil != err {
+ return fmt.Errorf("Error retrieving last block in the mock chain.")
+ }
+
+ if stateHash, _ := ml.GetCurrentStateHash(); !bytes.Equal(stateHash, block.StateHash) {
+ return fmt.Errorf("Current state does not validate against the latest block.")
+ }
+
+ return nil
+}
+
+type filterResult struct {
+ triggered bool
+ peerID *protos.PeerID
+ mutex *sync.Mutex
+}
+
+func (res filterResult) wasTriggered() bool {
+ res.mutex.Lock()
+ defer res.mutex.Unlock()
+ return res.triggered
+}
+
+func makeSimpleFilter(failureTrigger mockRequest, failureType mockResponse) (func(mockRequest, *protos.PeerID) mockResponse, *filterResult) {
+ res := &filterResult{triggered: false, mutex: &sync.Mutex{}}
+ return func(request mockRequest, peerID *protos.PeerID) mockResponse {
+ //fmt.Println("Received a request", request, "for replicaId", replicaId)
+ if request != failureTrigger {
+ return Normal
+ }
+
+ res.mutex.Lock()
+ defer res.mutex.Unlock()
+
+ if !res.triggered {
+ res.triggered = true
+ res.peerID = peerID
+ }
+
+ if *peerID == *res.peerID {
+ fmt.Println("Failing it with", failureType)
+ return failureType
+ }
+ return Normal
+ }, res
+
+}
+
+func TestCatchupSimple(t *testing.T) {
+ mrls := createRemoteLedgers(1, 3)
+
+ // Test from blockheight of 1, with valid genesis block
+ ml := NewMockLedger(mrls, nil, t)
+ ml.PutBlock(0, SimpleGetBlock(0))
+
+ sts := newTestStateTransfer(ml, mrls)
+ defer sts.Stop()
+ if err := executeStateTransfer(sts, ml, 7, 10, mrls); nil != err {
+ t.Fatalf("Simplest case: %s", err)
+ }
+
+}
+
+func TestCatchupWithLowMaxDeltas(t *testing.T) {
+ mrls := createRemoteLedgers(1, 3)
+
+ // Test from blockheight of 1, with valid genesis block
+ deltasTransferred := uint64(0)
+ blocksTransferred := uint64(0)
+ ml := NewMockLedger(mrls, func(request mockRequest, peerID *protos.PeerID) mockResponse {
+ if request == SyncDeltas {
+ deltasTransferred++
+ }
+
+ if request == SyncBlocks {
+ blocksTransferred++
+ }
+
+ return Normal
+ }, t)
+ ml.PutBlock(0, SimpleGetBlock(0))
+
+ sts := newTestStateTransfer(ml, mrls)
+ maxRange := uint64(3)
+ sts.maxStateDeltaRange = maxRange
+ sts.maxBlockRange = maxRange
+ defer sts.Stop()
+
+ targetBlock := uint64(7)
+ if err := executeStateTransfer(sts, ml, targetBlock, 10, mrls); nil != err {
+ t.Fatalf("Without deltas case: %s", err)
+ }
+
+ existingBlocks := uint64(1)
+ targetTransferred := (targetBlock - existingBlocks) / maxRange
+ if (targetBlock-existingBlocks)%maxRange != 0 {
+ targetTransferred++
+ }
+
+ if deltasTransferred != targetTransferred {
+ t.Errorf("Expected %d state deltas transferred, got %d", targetTransferred, deltasTransferred)
+ }
+
+ if blocksTransferred != targetTransferred {
+ t.Errorf("Expected %d state blocks transferred, got %d", targetTransferred, blocksTransferred)
+ }
+
+}
+
+func TestCatchupWithoutDeltas(t *testing.T) {
+ mrls := createRemoteLedgers(1, 3)
+
+ deltasTransferred := false
+
+ // Test from blockheight of 1, with valid genesis block
+ ml := NewMockLedger(mrls, func(request mockRequest, peerID *protos.PeerID) mockResponse {
+ if request == SyncDeltas {
+ deltasTransferred = true
+ }
+
+ return Normal
+ }, t)
+ ml.PutBlock(0, SimpleGetBlock(0))
+
+ sts := NewCoordinatorImpl(newPartialStack(ml, mrls)).(*coordinatorImpl)
+ sts.maxStateDeltas = 0
+
+ done := make(chan struct{})
+ go func() {
+ sts.blockThread()
+ close(done)
+ }()
+
+ if err := executeStateTransfer(sts, ml, 7, 10, mrls); nil != err {
+ t.Fatalf("Without deltas case: %s", err)
+ }
+
+ if deltasTransferred {
+ t.Fatalf("State delta retrieval should not occur during this test")
+ }
+
+ sts.Stop()
+
+ select {
+ case <-done:
+ case <-time.After(2 * time.Second):
+ t.Fatalf("Timed out waiting for block sync to complete")
+ }
+
+ for i := uint64(0); i <= 7; i++ {
+ if _, err := ml.GetBlockByNumber(i); err != nil {
+ t.Errorf("Expected block %d but got error %s", i, err)
+ }
+ }
+}
+
+func TestCatchupSyncBlocksErrors(t *testing.T) {
+ for _, failureType := range AllFailures {
+ mrls := createRemoteLedgers(1, 3)
+
+ // Test from blockheight of 1 with valid genesis block
+ // Timeouts of 10 milliseconds
+ filter, result := makeSimpleFilter(SyncBlocks, failureType)
+ ml := NewMockLedger(mrls, filter, t)
+
+ ml.PutBlock(0, SimpleGetBlock(0))
+ sts := newTestStateTransfer(ml, mrls)
+ defer sts.Stop()
+ sts.BlockRequestTimeout = 10 * time.Millisecond
+ if err := executeStateTransfer(sts, ml, 7, 10, mrls); nil != err {
+ t.Fatalf("SyncBlocksErrors %s case: %s", failureType, err)
+ }
+ if !result.wasTriggered() {
+ t.Fatalf("SyncBlocksErrors case never simulated a %v", failureType)
+ }
+ }
+}
+
+// Added for issue #676, for situations all potential sync targets fail, and sync is re-initiated, causing panic
+func TestCatchupSyncBlocksAllErrors(t *testing.T) {
+ blockNumber := uint64(10)
+
+ for _, failureType := range AllFailures {
+ mrls := createRemoteLedgers(1, 3)
+
+ // Test from blockheight of 1 with valid genesis block
+ // Timeouts of 10 milliseconds
+ succeeding := &filterResult{triggered: false, mutex: &sync.Mutex{}}
+ filter := func(request mockRequest, peerID *protos.PeerID) mockResponse {
+
+ succeeding.mutex.Lock()
+ defer succeeding.mutex.Unlock()
+ if !succeeding.triggered {
+ return failureType
+ }
+
+ return Normal
+ }
+ ml := NewMockLedger(mrls, filter, t)
+
+ ml.PutBlock(0, SimpleGetBlock(0))
+ sts := newTestStateTransfer(ml, mrls)
+ defer sts.Stop()
+ sts.BlockRequestTimeout = 10 * time.Millisecond
+
+ for peerID := range mrls.remoteLedgers {
+ mrls.GetMockRemoteLedgerByPeerID(&peerID).blockHeight = blockNumber + 1
+ }
+
+ blockHash := SimpleGetBlockHash(blockNumber)
+ if err, _ := sts.SyncToTarget(blockNumber, blockHash, nil); err == nil {
+ t.Fatalf("State transfer should not have completed yet")
+ }
+
+ succeeding.triggered = true
+ if err, _ := sts.SyncToTarget(blockNumber, blockHash, nil); err != nil {
+ t.Fatalf("Error completing state transfer")
+ }
+
+ if size := ml.GetBlockchainSize(); size != blockNumber+1 {
+ t.Fatalf("Blockchain should be caught up to block %d, but is only %d tall", blockNumber, size)
+ }
+
+ block, err := ml.GetBlock(blockNumber)
+
+ if nil != err {
+ t.Fatalf("Error retrieving last block in the mock chain.")
+ }
+
+ if stateHash, _ := ml.GetCurrentStateHash(); !bytes.Equal(stateHash, block.StateHash) {
+ t.Fatalf("Current state does not validate against the latest block.")
+ }
+ }
+}
+
+func TestCatchupMissingEarlyChain(t *testing.T) {
+ mrls := createRemoteLedgers(1, 3)
+
+ // Test from blockheight of 5 (with missing blocks 0-3)
+ ml := NewMockLedger(mrls, nil, t)
+ ml.PutBlock(4, SimpleGetBlock(4))
+ sts := newTestStateTransfer(ml, mrls)
+ defer sts.Stop()
+ if err := executeStateTransfer(sts, ml, 7, 10, mrls); nil != err {
+ t.Fatalf("MissingEarlyChain case: %s", err)
+ }
+}
+
+func TestCatchupSyncSnapshotError(t *testing.T) {
+ for _, failureType := range AllFailures {
+ mrls := createRemoteLedgers(1, 3)
+
+ // Test from blockheight of 5 (with missing blocks 0-3)
+ // Timeouts of 1 second, also test corrupt snapshot
+ filter, result := makeSimpleFilter(SyncSnapshot, failureType)
+ ml := NewMockLedger(mrls, filter, t)
+ ml.PutBlock(4, SimpleGetBlock(4))
+ sts := newTestStateTransfer(ml, mrls)
+ defer sts.Stop()
+ sts.StateSnapshotRequestTimeout = 10 * time.Millisecond
+ if err := executeStateTransfer(sts, ml, 7, 10, mrls); nil != err {
+ t.Fatalf("SyncSnapshotError %s case: %s", failureType, err)
+ }
+ if !result.wasTriggered() {
+ t.Fatalf("SyncSnapshotError case never simulated a %s", failureType)
+ }
+ }
+}
+
+func TestCatchupSyncDeltasError(t *testing.T) {
+ for _, failureType := range AllFailures {
+ mrls := createRemoteLedgers(1, 3)
+
+ // Test from blockheight of 5 (with missing blocks 0-3)
+ // Timeouts of 1 second
+ filter, result := makeSimpleFilter(SyncDeltas, failureType)
+ ml := NewMockLedger(mrls, filter, t)
+ ml.PutBlock(4, SimpleGetBlock(4))
+ ml.state = SimpleGetState(4)
+ sts := newTestStateTransfer(ml, mrls)
+ defer sts.Stop()
+ sts.StateDeltaRequestTimeout = 10 * time.Millisecond
+ sts.StateSnapshotRequestTimeout = 10 * time.Millisecond
+ if err := executeStateTransfer(sts, ml, 7, 10, mrls); nil != err {
+ t.Fatalf("SyncDeltasError %s case: %s", failureType, err)
+ }
+ if !result.wasTriggered() {
+ t.Fatalf("SyncDeltasError case never simulated a %s", failureType)
+ }
+ }
+}
+
+func executeBlockRecovery(ml *MockLedger, millisTimeout int, mrls *MockRemoteHashLedgerDirectory) error {
+
+ sts := newTestThreadlessStateTransfer(ml, mrls)
+ sts.BlockRequestTimeout = time.Duration(millisTimeout) * time.Millisecond
+ sts.RecoverDamage = true
+
+ w := make(chan struct{})
+
+ go func() {
+ for !sts.verifyAndRecoverBlockchain() {
+ }
+ w <- struct{}{}
+ }()
+
+ select {
+ case <-time.After(time.Second * 2):
+ return fmt.Errorf("Timed out waiting for blocks to replicate for blockchain")
+ case <-w:
+ // Do nothing, continue the test
+ }
+
+ if n, err := ml.VerifyBlockchain(7, 0); 0 != n || nil != err {
+ return fmt.Errorf("Blockchain claims to be up to date, but does not verify")
+ }
+
+ return nil
+}
+
+func executeBlockRecoveryWithPanic(ml *MockLedger, millisTimeout int, mrls *MockRemoteHashLedgerDirectory) error {
+
+ sts := newTestThreadlessStateTransfer(ml, mrls)
+ sts.BlockRequestTimeout = time.Duration(millisTimeout) * time.Millisecond
+ sts.RecoverDamage = false
+
+ w := make(chan bool)
+
+ go func() {
+ defer func() {
+ recover()
+ w <- true
+ }()
+ for !sts.verifyAndRecoverBlockchain() {
+ }
+ w <- false
+ }()
+
+ select {
+ case <-time.After(time.Second * 2):
+ return fmt.Errorf("Timed out waiting for blocks to replicate for blockchain")
+ case didPanic := <-w:
+ // Do nothing, continue the test
+ if !didPanic {
+ return fmt.Errorf("Blockchain was supposed to panic on modification, but did not")
+ }
+ }
+
+ return nil
+}
+
+func TestCatchupLaggingChains(t *testing.T) {
+ mrls := createRemoteLedgers(0, 3)
+
+ for peerID := range mrls.remoteLedgers {
+ mrls.GetMockRemoteLedgerByPeerID(&peerID).blockHeight = 701
+ }
+
+ ml := NewMockLedger(mrls, nil, t)
+ ml.PutBlock(7, SimpleGetBlock(7))
+ if err := executeBlockRecovery(ml, 10, mrls); nil != err {
+ t.Fatalf("TestCatchupLaggingChains short chain failure: %s", err)
+ }
+
+ ml = NewMockLedger(mrls, nil, t)
+ ml.PutBlock(200, SimpleGetBlock(200))
+ // Use a large timeout here because the mock ledger is slow for large blocks
+ if err := executeBlockRecovery(ml, 1000, mrls); nil != err {
+ t.Fatalf("TestCatchupLaggingChains long chain failure: %s", err)
+ }
+}
+
+func TestCatchupLaggingWithSmallMaxBlocks(t *testing.T) {
+ mrls := createRemoteLedgers(0, 3)
+
+ for peerID := range mrls.remoteLedgers {
+ mrls.GetMockRemoteLedgerByPeerID(&peerID).blockHeight = 201
+ }
+
+ maxSyncBlocks := uint64(3)
+ startingBlock := uint64(200)
+
+ syncBlockTries := uint64(0)
+ ml := NewMockLedger(mrls, func(request mockRequest, peerID *protos.PeerID) mockResponse {
+ if request == SyncBlocks {
+ syncBlockTries++
+ }
+
+ return Normal
+ }, t)
+ ml.PutBlock(startingBlock, SimpleGetBlock(startingBlock))
+
+ sts := newTestThreadlessStateTransfer(ml, mrls)
+ sts.BlockRequestTimeout = 1000 * time.Millisecond
+ sts.RecoverDamage = true
+ sts.maxBlockRange = maxSyncBlocks
+ sts.blockVerifyChunkSize = maxSyncBlocks
+
+ w := make(chan struct{})
+
+ go func() {
+ for !sts.verifyAndRecoverBlockchain() {
+ }
+ w <- struct{}{}
+ }()
+
+ select {
+ case <-time.After(time.Second * 2):
+ t.Fatalf("Timed out waiting for blocks to replicate for blockchain")
+ case <-w:
+ // Do nothing, continue the test
+ }
+
+ target := startingBlock / maxSyncBlocks
+ if startingBlock%maxSyncBlocks != 0 {
+ target++
+ }
+
+ if syncBlockTries != target {
+ t.Fatalf("Expected %d calls to sync blocks, but got %d, this indicates maxBlockRange is not being respected", target, syncBlockTries)
+ }
+}
+
+func TestCatchupLaggingChainsErrors(t *testing.T) {
+ for _, failureType := range AllFailures {
+ mrls := createRemoteLedgers(0, 3)
+
+ for peerID := range mrls.remoteLedgers {
+ mrls.GetMockRemoteLedgerByPeerID(&peerID).blockHeight = 701
+ }
+
+ filter, result := makeSimpleFilter(SyncBlocks, failureType)
+ ml := NewMockLedger(mrls, filter, t)
+ ml.PutBlock(7, SimpleGetBlock(7))
+ if err := executeBlockRecovery(ml, 10, mrls); nil != err {
+ t.Fatalf("TestCatchupLaggingChainsErrors %s short chain with timeout failure: %s", failureType, err)
+ }
+ if !result.wasTriggered() {
+ t.Fatalf("TestCatchupLaggingChainsErrors short chain with timeout never simulated a %s", failureType)
+ }
+ }
+}
+
+func TestCatchupCorruptChains(t *testing.T) {
+ mrls := createRemoteLedgers(0, 3)
+
+ for peerID := range mrls.remoteLedgers {
+ mrls.GetMockRemoteLedgerByPeerID(&peerID).blockHeight = 701
+ }
+
+ ml := NewMockLedger(mrls, nil, t)
+ ml.PutBlock(7, SimpleGetBlock(7))
+ ml.PutBlock(3, SimpleGetBlock(2))
+ if err := executeBlockRecovery(ml, 10, mrls); nil != err {
+ t.Fatalf("TestCatchupCorruptChains short chain failure: %s", err)
+ }
+
+ ml = NewMockLedger(mrls, nil, t)
+ ml.PutBlock(7, SimpleGetBlock(7))
+ ml.PutBlock(3, SimpleGetBlock(2))
+ defer func() {
+ //fmt.Println("Executing defer")
+ // We expect a panic, this is great
+ recover()
+ }()
+ if err := executeBlockRecoveryWithPanic(ml, 10, mrls); nil != err {
+ t.Fatalf("TestCatchupCorruptChains short chain failure: %s", err)
+ }
+}
+
+func TestBlockRangeOrdering(t *testing.T) {
+ lowRange := &blockRange{
+ highBlock: 10,
+ lowBlock: 5,
+ }
+
+ highRange := &blockRange{
+ highBlock: 15,
+ lowBlock: 12,
+ }
+
+ bigRange := &blockRange{
+ highBlock: 15,
+ lowBlock: 9,
+ }
+
+ slice := blockRangeSlice([]*blockRange{lowRange, highRange, bigRange})
+
+ sort.Sort(slice)
+
+ if slice[0] != bigRange {
+ t.Fatalf("Big range should come first")
+ }
+
+ if slice[1] != highRange {
+ t.Fatalf("High range should come second")
+ }
+
+ if slice[2] != lowRange {
+ t.Fatalf("Low range should come third")
+ }
+}
diff --git a/core/rest/api.go b/core/rest/api.go
new file mode 100644
index 00000000000..0af8f3e1e3a
--- /dev/null
+++ b/core/rest/api.go
@@ -0,0 +1,180 @@
+/*
+Copyright IBM Corp. 2016 All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package rest
+
+import (
+ "errors"
+ "fmt"
+ "google/protobuf"
+
+ "golang.org/x/net/context"
+
+ "github.com/golang/protobuf/proto"
+ "github.com/hyperledger/fabric/core/ledger"
+ pb "github.com/hyperledger/fabric/protos"
+ "github.com/spf13/viper"
+)
+
+var (
+ // ErrNotFound is returned if a requested resource does not exist
+ ErrNotFound = errors.New("openchain: resource not found")
+)
+
+// PeerInfo defines API to peer info data
+type PeerInfo interface {
+ GetPeers() (*pb.PeersMessage, error)
+ GetPeerEndpoint() (*pb.PeerEndpoint, error)
+}
+
+// ServerOpenchain defines the Openchain server object, which holds the
+// Ledger data structure and the pointer to the peerServer.
+type ServerOpenchain struct {
+ ledger *ledger.Ledger
+ peerInfo PeerInfo
+}
+
+// NewOpenchainServer creates a new instance of the ServerOpenchain.
+func NewOpenchainServer() (*ServerOpenchain, error) {
+ // Get a handle to the Ledger singleton.
+ ledger, err := ledger.GetLedger()
+ if err != nil {
+ return nil, err
+ }
+
+ s := &ServerOpenchain{ledger: ledger}
+
+ return s, nil
+}
+
+// NewOpenchainServerWithPeerInfo creates a new instance of the ServerOpenchain.
+func NewOpenchainServerWithPeerInfo(peerServer PeerInfo) (*ServerOpenchain, error) {
+ // Get a handle to the Ledger singleton.
+ ledger, err := ledger.GetLedger()
+ if err != nil {
+ return nil, err
+ }
+
+ s := &ServerOpenchain{ledger: ledger, peerInfo: peerServer}
+
+ return s, nil
+}
+
+// GetBlockchainInfo returns information about the blockchain ledger such as
+// height, current block hash, and previous block hash.
+func (s *ServerOpenchain) GetBlockchainInfo(ctx context.Context, e *google_protobuf.Empty) (*pb.BlockchainInfo, error) {
+ blockchainInfo, err := s.ledger.GetBlockchainInfo()
+ if blockchainInfo.Height == 0 {
+ return nil, fmt.Errorf("No blocks in blockchain.")
+ }
+ return blockchainInfo, err
+}
+
+// GetBlockByNumber returns the data contained within a specific block in the
+// blockchain. The genesis block is block zero.
+func (s *ServerOpenchain) GetBlockByNumber(ctx context.Context, num *pb.BlockNumber) (*pb.Block, error) {
+ block, err := s.ledger.GetBlockByNumber(num.Number)
+ if err != nil {
+ switch err {
+ case ledger.ErrOutOfBounds:
+ return nil, ErrNotFound
+ default:
+ return nil, fmt.Errorf("Error retrieving block from blockchain: %s", err)
+ }
+ }
+
+ // Remove payload from deploy transactions. This is done to make rest api
+ // calls more lightweight as the payload for these types of transactions
+ // can be very large. If the payload is needed, the caller should fetch the
+ // individual transaction.
+ blockTransactions := block.GetTransactions()
+ for _, transaction := range blockTransactions {
+ if transaction.Type == pb.Transaction_CHAINCODE_DEPLOY {
+ deploymentSpec := &pb.ChaincodeDeploymentSpec{}
+ err := proto.Unmarshal(transaction.Payload, deploymentSpec)
+ if err != nil {
+ if !viper.GetBool("security.privacy") {
+ return nil, err
+ }
+ //if privacy is enabled, payload is encrypted and unmarshal will
+ //likely fail... given we were going to just set the CodePackage
+ //to nil anyway, just recover and continue
+ deploymentSpec = &pb.ChaincodeDeploymentSpec{}
+ }
+ deploymentSpec.CodePackage = nil
+ deploymentSpecBytes, err := proto.Marshal(deploymentSpec)
+ if err != nil {
+ return nil, err
+ }
+ transaction.Payload = deploymentSpecBytes
+ }
+ }
+
+ return block, nil
+}
+
+// GetBlockCount returns the current number of blocks in the blockchain data
+// structure.
+func (s *ServerOpenchain) GetBlockCount(ctx context.Context, e *google_protobuf.Empty) (*pb.BlockCount, error) {
+ // Total number of blocks in the blockchain.
+ size := s.ledger.GetBlockchainSize()
+
+ // Check the number of blocks in the blockchain. If the blockchain is empty,
+ // return error. There will always be at least one block in the blockchain,
+ // the genesis block.
+ if size > 0 {
+ count := &pb.BlockCount{Count: size}
+ return count, nil
+ }
+
+ return nil, fmt.Errorf("No blocks in blockchain.")
+}
+
+// GetState returns the value for a particular chaincode ID and key
+func (s *ServerOpenchain) GetState(ctx context.Context, chaincodeID, key string) ([]byte, error) {
+ return s.ledger.GetState(chaincodeID, key, true)
+}
+
+// GetTransactionByUUID returns a transaction matching the specified UUID
+func (s *ServerOpenchain) GetTransactionByUUID(ctx context.Context, txUUID string) (*pb.Transaction, error) {
+ transaction, err := s.ledger.GetTransactionByUUID(txUUID)
+ if err != nil {
+ switch err {
+ case ledger.ErrResourceNotFound:
+ return nil, ErrNotFound
+ default:
+ return nil, fmt.Errorf("Error retrieving transaction from blockchain: %s", err)
+ }
+ }
+ return transaction, nil
+}
+
+// GetPeers returns a list of all peer nodes currently connected to the target peer.
+func (s *ServerOpenchain) GetPeers(ctx context.Context, e *google_protobuf.Empty) (*pb.PeersMessage, error) {
+ return s.peerInfo.GetPeers()
+}
+
+// GetPeerEndpoint returns PeerEndpoint info of target peer.
+func (s *ServerOpenchain) GetPeerEndpoint(ctx context.Context, e *google_protobuf.Empty) (*pb.PeersMessage, error) {
+ peers := []*pb.PeerEndpoint{}
+ peerEndpoint, err := s.peerInfo.GetPeerEndpoint()
+ if err != nil {
+ return nil, err
+ }
+ peers = append(peers, peerEndpoint)
+ peersMessage := &pb.PeersMessage{Peers: peers}
+ return peersMessage, nil
+}
diff --git a/core/rest/api_test.go b/core/rest/api_test.go
new file mode 100644
index 00000000000..05eb214c093
--- /dev/null
+++ b/core/rest/api_test.go
@@ -0,0 +1,440 @@
+/*
+Copyright IBM Corp. 2016 All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package rest
+
+import (
+ "bytes"
+ "fmt"
+ "google/protobuf"
+ "os"
+ "testing"
+
+ "github.com/hyperledger/fabric/core/ledger"
+ "github.com/hyperledger/fabric/core/util"
+ "github.com/hyperledger/fabric/protos"
+ "github.com/spf13/viper"
+ "golang.org/x/net/context"
+)
+
+func TestMain(m *testing.M) {
+ setupTestConfig()
+ os.Exit(m.Run())
+}
+
+func setupTestConfig() {
+ viper.SetConfigName("rest_test") // name of config file (without extension)
+ viper.AddConfigPath(".") // path to look for the config file in
+ err := viper.ReadInConfig() // Find and read the config file
+ if err != nil { // Handle errors reading the config file
+ panic(fmt.Errorf("Fatal error config file: %s \n", err))
+ }
+}
+
+type peerInfo struct {
+}
+
+func (p *peerInfo) GetPeers() (*protos.PeersMessage, error) {
+ peers := []*protos.PeerEndpoint{}
+ pe1 := &protos.PeerEndpoint{ID: &protos.PeerID{Name: viper.GetString("peer.id")}, Address: "localhost:30303", Type: protos.PeerEndpoint_VALIDATOR}
+ peers = append(peers, pe1)
+
+ /*
+ for _, msgHandler := range p.handlerMap.m {
+ peerEndpoint, err := msgHandler.To()
+ if err != nil {
+ return nil, fmt.Errorf("Error getting peers: %s", err)
+ }
+ peers = append(peers, &peerEndpoint)
+ }
+ */
+ peersMessage := &protos.PeersMessage{Peers: peers}
+ return peersMessage, nil
+}
+
+func (p *peerInfo) GetPeerEndpoint() (*protos.PeerEndpoint, error) {
+ pe := &protos.PeerEndpoint{ID: &protos.PeerID{Name: viper.GetString("peer.id")}, Address: "localhost:30303", Type: protos.PeerEndpoint_VALIDATOR}
+ return pe, nil
+}
+
+func TestServerOpenchain_API_GetBlockchainInfo(t *testing.T) {
+ // Construct a ledger with 0 blocks.
+ ledger := ledger.InitTestLedger(t)
+ // Initialize the OpenchainServer object.
+ server, err := NewOpenchainServerWithPeerInfo(new(peerInfo))
+ if err != nil {
+ t.Logf("Error creating OpenchainServer: %s", err)
+ t.Fail()
+ }
+ // Attempt to retrieve the blockchain info. There are no blocks
+ // in this blockchain, therefore this test should intentionally fail.
+ info, err := server.GetBlockchainInfo(context.Background(), &google_protobuf.Empty{})
+ if err != nil {
+ // Success
+ t.Logf("Error retrieving blockchain info: %s", err)
+ } else {
+ // Failure
+ t.Logf("Error attempting to retrive info from emptry blockchain: %v", info)
+ t.Fail()
+ }
+
+ // add 3 blocks to ledger.
+ buildTestLedger1(ledger, t)
+ // Attempt to retrieve the blockchain info.
+ info, err = server.GetBlockchainInfo(context.Background(), &google_protobuf.Empty{})
+ if err != nil {
+ t.Logf("Error retrieving blockchain info: %s", err)
+ t.Fail()
+ } else {
+ t.Logf("Blockchain 1 info: %v", info)
+ }
+
+ // add 5 blocks more.
+ buildTestLedger2(ledger, t)
+ // Attempt to retrieve the blockchain info.
+ info, err = server.GetBlockchainInfo(context.Background(), &google_protobuf.Empty{})
+ if err != nil {
+ t.Logf("Error retrieving blockchain info: %s", err)
+ t.Fail()
+ } else {
+ t.Logf("Blockchain 2 info: %v", info)
+ }
+}
+
+func TestServerOpenchain_API_GetBlockByNumber(t *testing.T) {
+ // Construct a ledger with 0 blocks.
+ ledger.InitTestLedger(t)
+
+ // Initialize the OpenchainServer object.
+ server, err := NewOpenchainServerWithPeerInfo(new(peerInfo))
+ if err != nil {
+ t.Logf("Error creating OpenchainServer: %s", err)
+ t.Fail()
+ }
+
+ // Attempt to retrieve the 0th block from the blockchain. There are no blocks
+ // in this blockchain, therefore this test should intentionally fail.
+
+ block, err := server.GetBlockByNumber(context.Background(), &protos.BlockNumber{Number: 0})
+ if err != nil {
+ // Success
+ t.Logf("Error retrieving Block from blockchain: %s", err)
+ } else {
+ // Failure
+ t.Logf("Attempting to retrieve from empty blockchain: %v", block)
+ t.Fail()
+ }
+
+ // Construct a ledger with 3 blocks.
+ ledger1 := ledger.InitTestLedger(t)
+ buildTestLedger1(ledger1, t)
+ server.ledger = ledger1
+
+ // Retrieve the 0th block from the blockchain.
+ block, err = server.GetBlockByNumber(context.Background(), &protos.BlockNumber{Number: 0})
+ if err != nil {
+ t.Logf("Error retrieving Block from blockchain: %s", err)
+ t.Fail()
+ } else {
+ t.Logf("Block #0: %v", block)
+ }
+
+ // Retrieve the 3rd block from the blockchain, blocks are numbered starting
+ // from 0.
+ block, err = server.GetBlockByNumber(context.Background(), &protos.BlockNumber{Number: 2})
+ if err != nil {
+ t.Logf("Error retrieving Block from blockchain: %s", err)
+ t.Fail()
+ } else {
+ t.Logf("Block #2: %v", block)
+ }
+
+ // Retrieve the 5th block from the blockchain. There are only 3 blocks in this
+ // blockchain, therefore this test should intentionally fail.
+ block, err = server.GetBlockByNumber(context.Background(), &protos.BlockNumber{Number: 4})
+ if err != nil {
+ // Success.
+ t.Logf("Error retrieving Block from blockchain: %s", err)
+ } else {
+ // Failure
+ t.Logf("Trying to retrieve non-existent block from blockchain: %v", block)
+ t.Fail()
+ }
+}
+
+func TestServerOpenchain_API_GetBlockCount(t *testing.T) {
+ // Must initialize the ledger singleton before initializing the
+ // OpenchainServer, as it needs that pointer.
+
+ // Construct a ledger with 0 blocks.
+ ledger := ledger.InitTestLedger(t)
+
+ // Initialize the OpenchainServer object.
+ server, err := NewOpenchainServerWithPeerInfo(new(peerInfo))
+ if err != nil {
+ t.Logf("Error creating OpenchainServer: %s", err)
+ t.Fail()
+ }
+
+ // Retrieve the current number of blocks in the blockchain. There are no blocks
+ // in this blockchain, therefore this test should intentionally fail.
+ count, err := server.GetBlockCount(context.Background(), &google_protobuf.Empty{})
+ if err != nil {
+ // Success
+ t.Logf("Error retrieving BlockCount from blockchain: %s", err)
+ } else {
+ // Failure
+ t.Logf("Attempting to query an empty blockchain: %v", count.Count)
+ t.Fail()
+ }
+
+ // Add three 3 blocks to ledger.
+ buildTestLedger1(ledger, t)
+ // Retrieve the current number of blocks in the blockchain. Must be 3.
+ count, err = server.GetBlockCount(context.Background(), &google_protobuf.Empty{})
+ if err != nil {
+ t.Logf("Error retrieving BlockCount from blockchain: %s", err)
+ t.Fail()
+ } else if count.Count != 3 {
+ t.Logf("Error! Blockchain must have 3 blocks!")
+ t.Fail()
+ } else {
+ t.Logf("Current BlockCount: %v", count.Count)
+ }
+
+ // Add 5 more blocks to ledger.
+ buildTestLedger2(ledger, t)
+ // Retrieve the current number of blocks in the blockchain. Must be 5.
+ count, err = server.GetBlockCount(context.Background(), &google_protobuf.Empty{})
+ if err != nil {
+ t.Logf("Error retrieving BlockCount from blockchain: %s", err)
+ t.Fail()
+ } else if count.Count != 8 {
+ t.Logf("Error! Blockchain must have 8 blocks!")
+ t.Fail()
+ } else {
+ t.Logf("Current BlockCount: %v", count.Count)
+ }
+}
+
+func TestServerOpenchain_API_GetState(t *testing.T) {
+ ledger1 := ledger.InitTestLedger(t)
+ // Construct a blockchain with 3 blocks.
+ buildTestLedger1(ledger1, t)
+
+ // Initialize the OpenchainServer object.
+ server, err := NewOpenchainServerWithPeerInfo(new(peerInfo))
+ if err != nil {
+ t.Logf("Error creating OpenchainServer: %s", err)
+ t.Fail()
+ }
+
+ // Retrieve the current number of blocks in the blockchain. Must be 3.
+ val, stateErr := server.GetState(context.Background(), "MyContract1", "code")
+ if stateErr != nil {
+ t.Fatalf("Error retrieving state: %s", stateErr)
+ } else if bytes.Compare(val, []byte("code example")) != 0 {
+ t.Fatalf("Expected %s, but got %s", []byte("code example"), val)
+ }
+
+}
+
+// buildTestLedger1 builds a simple ledger data structure that contains a blockchain with 3 blocks.
+func buildTestLedger1(ledger1 *ledger.Ledger, t *testing.T) {
+ // --------------------------------------------------
+ // Add the 0th (genesis block)
+ ledger1.BeginTxBatch(0)
+ err := ledger1.CommitTxBatch(0, []*protos.Transaction{}, nil, []byte("dummy-proof"))
+ if err != nil {
+ t.Fatalf("Error in commit: %s", err)
+ }
+
+ // --------------------------------------------------
+
+ // -----------------------------------------------------------------
+
+ // Deploy a contract
+ // To deploy a contract, we call the 'NewContract' function in the 'Contracts' contract
+ // TODO Use chaincode instead of contract?
+ // TODO Two types of transactions. Execute transaction, deploy/delete/update contract
+ ledger1.BeginTxBatch(1)
+ transaction1a, err := protos.NewTransaction(protos.ChaincodeID{Path: "Contracts"}, generateUUID(t), "NewContract", []string{"name: MyContract1, code: var x; function setX(json) {x = json.x}}"})
+ if err != nil {
+ t.Logf("Error creating NewTransaction: %s", err)
+ t.Fail()
+ }
+ // VM runs transaction1a and updates the global state with the result
+ // In this case, the 'Contracts' contract stores 'MyContract1' in its state
+ ledger1.TxBegin(transaction1a.Uuid)
+ ledger1.SetState("MyContract1", "code", []byte("code example"))
+ ledger1.TxFinished(transaction1a.Uuid, true)
+ ledger1.CommitTxBatch(1, []*protos.Transaction{transaction1a}, nil, []byte("dummy-proof"))
+ // ----------------------------------------------------------------
+
+ // -----------------------------------------------------------------
+
+ ledger1.BeginTxBatch(2)
+ transaction2a, err := protos.NewTransaction(protos.ChaincodeID{Path: "MyContract"}, generateUUID(t), "setX", []string{"{x: \"hello\"}"})
+ if err != nil {
+ t.Logf("Error creating NewTransaction: %s", err)
+ t.Fail()
+ }
+ transaction2b, err := protos.NewTransaction(protos.ChaincodeID{Path: "MyOtherContract"}, generateUUID(t), "setY", []string{"{y: \"goodbuy\"}"})
+ if err != nil {
+ t.Logf("Error creating NewTransaction: %s", err)
+ t.Fail()
+ }
+
+ // Run this transction in the VM. The VM updates the state
+ ledger1.TxBegin(transaction2a.Uuid)
+ ledger1.SetState("MyContract", "x", []byte("hello"))
+ ledger1.SetState("MyOtherContract", "y", []byte("goodbuy"))
+ ledger1.TxFinished(transaction2a.Uuid, true)
+
+ // Commit txbatch that creates the 2nd block on blockchain
+ ledger1.CommitTxBatch(2, []*protos.Transaction{transaction2a, transaction2b}, nil, []byte("dummy-proof"))
+ // ----------------------------------------------------------------
+ return
+}
+
+// buildTestLedger2 builds a simple ledger data structure that contains a blockchain
+// of 5 blocks, with each block containing the same number of transactions as its
+// index within the blockchain. Block 0, 0 transactions. Block 1, 1 transaction,
+// and so on.
+func buildTestLedger2(ledger *ledger.Ledger, t *testing.T) {
+ // --------------------------------------------------
+ // Add the 0th (genesis block)
+ ledger.BeginTxBatch(0)
+ ledger.CommitTxBatch(0, []*protos.Transaction{}, nil, []byte("dummy-proof"))
+ // --------------------------------------------------
+
+ // -----------------------------------------------------------------
+
+ // Deploy a contract
+ // To deploy a contract, we call the 'NewContract' function in the 'Contracts' contract
+ // TODO Use chaincode instead of contract?
+ // TODO Two types of transactions. Execute transaction, deploy/delete/update contract
+ ledger.BeginTxBatch(1)
+ transaction1a, err := protos.NewTransaction(protos.ChaincodeID{Path: "Contracts"}, generateUUID(t), "NewContract", []string{"name: MyContract1, code: var x; function setX(json) {x = json.x}}"})
+ if err != nil {
+ t.Logf("Error creating NewTransaction: %s", err)
+ t.Fail()
+ }
+ // VM runs transaction1a and updates the global state with the result
+ // In this case, the 'Contracts' contract stores 'MyContract1' in its state
+ ledger.TxBegin(transaction1a.Uuid)
+ ledger.SetState("MyContract1", "code", []byte("code example"))
+ ledger.TxFinished(transaction1a.Uuid, true)
+ ledger.CommitTxBatch(1, []*protos.Transaction{transaction1a}, nil, []byte("dummy-proof"))
+
+ // ----------------------------------------------------------------
+
+ // -----------------------------------------------------------------
+
+ ledger.BeginTxBatch(2)
+ transaction2a, err := protos.NewTransaction(protos.ChaincodeID{Path: "MyContract"}, generateUUID(t), "setX", []string{"{x: \"hello\"}"})
+ if err != nil {
+ t.Logf("Error creating NewTransaction: %s", err)
+ t.Fail()
+ }
+ transaction2b, err := protos.NewTransaction(protos.ChaincodeID{Path: "MyOtherContract"}, generateUUID(t), "setY", []string{"{y: \"goodbuy\"}"})
+ if err != nil {
+ t.Logf("Error creating NewTransaction: %s", err)
+ t.Fail()
+ }
+
+ // Run this transction in the VM. The VM updates the state
+ ledger.TxBegin(transaction2a.Uuid)
+ ledger.SetState("MyContract", "x", []byte("hello"))
+ ledger.SetState("MyOtherContract", "y", []byte("goodbuy"))
+ ledger.TxFinished(transaction2a.Uuid, true)
+
+ // Commit txbatch that creates the 2nd block on blockchain
+ ledger.CommitTxBatch(2, []*protos.Transaction{transaction2a, transaction2b}, nil, []byte("dummy-proof"))
+ // ----------------------------------------------------------------
+
+ // -----------------------------------------------------------------
+
+ ledger.BeginTxBatch(3)
+ transaction3a, err := protos.NewTransaction(protos.ChaincodeID{Path: "MyContract"}, generateUUID(t), "setX", []string{"{x: \"hello\"}"})
+ if err != nil {
+ t.Logf("Error creating NewTransaction: %s", err)
+ t.Fail()
+ }
+ transaction3b, err := protos.NewTransaction(protos.ChaincodeID{Path: "MyOtherContract"}, generateUUID(t), "setY", []string{"{y: \"goodbuy\"}"})
+ if err != nil {
+ t.Logf("Error creating NewTransaction: %s", err)
+ t.Fail()
+ }
+ transaction3c, err := protos.NewTransaction(protos.ChaincodeID{Path: "MyImportantContract"}, generateUUID(t), "setZ", []string{"{z: \"super\"}"})
+ if err != nil {
+ t.Logf("Error creating NewTransaction: %s", err)
+ t.Fail()
+ }
+ ledger.TxBegin(transaction3a.Uuid)
+ ledger.SetState("MyContract", "x", []byte("hello"))
+ ledger.SetState("MyOtherContract", "y", []byte("goodbuy"))
+ ledger.SetState("MyImportantContract", "z", []byte("super"))
+ ledger.TxFinished(transaction3a.Uuid, true)
+ ledger.CommitTxBatch(3, []*protos.Transaction{transaction3a, transaction3b, transaction3c}, nil, []byte("dummy-proof"))
+
+ // ----------------------------------------------------------------
+
+ // -----------------------------------------------------------------
+
+ ledger.BeginTxBatch(4)
+ // Now we want to run the function 'setX' in 'MyContract
+
+ // Create a transaction'
+ transaction4a, err := protos.NewTransaction(protos.ChaincodeID{Path: "MyContract"}, generateUUID(t), "setX", []string{"{x: \"hello\"}"})
+ if err != nil {
+ t.Logf("Error creating NewTransaction: %s", err)
+ t.Fail()
+ }
+ transaction4b, err := protos.NewTransaction(protos.ChaincodeID{Path: "MyOtherContract"}, generateUUID(t), "setY", []string{"{y: \"goodbuy\"}"})
+ if err != nil {
+ t.Logf("Error creating NewTransaction: %s", err)
+ t.Fail()
+ }
+ transaction4c, err := protos.NewTransaction(protos.ChaincodeID{Path: "MyImportantContract"}, generateUUID(t), "setZ", []string{"{z: \"super\"}"})
+ if err != nil {
+ t.Logf("Error creating NewTransaction: %s", err)
+ t.Fail()
+ }
+ transaction4d, err := protos.NewTransaction(protos.ChaincodeID{Path: "MyMEGAContract"}, generateUUID(t), "setMEGA", []string{"{mega: \"MEGA\"}"})
+ if err != nil {
+ t.Logf("Error creating NewTransaction: %s", err)
+ t.Fail()
+ }
+
+ // Run this transction in the VM. The VM updates the state
+ ledger.TxBegin(transaction4a.Uuid)
+ ledger.SetState("MyContract", "x", []byte("hello"))
+ ledger.SetState("MyOtherContract", "y", []byte("goodbuy"))
+ ledger.SetState("MyImportantContract", "z", []byte("super"))
+ ledger.SetState("MyMEGAContract", "mega", []byte("MEGA"))
+ ledger.TxFinished(transaction4a.Uuid, true)
+
+ // Create the 4th block and add it to the chain
+ ledger.CommitTxBatch(4, []*protos.Transaction{transaction4a, transaction4b, transaction4c, transaction4d}, nil, []byte("dummy-proof"))
+ // ----------------------------------------------------------------
+
+ return
+}
+
+func generateUUID(t *testing.T) string {
+ return util.GenerateUUID()
+}
diff --git a/core/rest/rest_api.go b/core/rest/rest_api.go
new file mode 100644
index 00000000000..0668a49dbf3
--- /dev/null
+++ b/core/rest/rest_api.go
@@ -0,0 +1,1778 @@
+/*
+Copyright IBM Corp. 2016 All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package rest
+
+import (
+ "encoding/json"
+ "errors"
+ "fmt"
+ "google/protobuf"
+ "io"
+ "io/ioutil"
+ "net/http"
+ "net/url"
+ "os"
+ "regexp"
+ "strconv"
+ "strings"
+
+ "golang.org/x/net/context"
+
+ "github.com/gocraft/web"
+ "github.com/golang/protobuf/jsonpb"
+ "github.com/op/go-logging"
+ "github.com/spf13/viper"
+
+ core "github.com/hyperledger/fabric/core"
+ "github.com/hyperledger/fabric/core/chaincode"
+ "github.com/hyperledger/fabric/core/comm"
+ "github.com/hyperledger/fabric/core/crypto"
+ "github.com/hyperledger/fabric/core/crypto/primitives"
+ pb "github.com/hyperledger/fabric/protos"
+)
+
+var restLogger = logging.MustGetLogger("rest")
+
+// serverOpenchain is a variable that holds the pointer to the
+// underlying ServerOpenchain object. serverDevops is a variable that holds
+// the pointer to the underlying Devops object. This is necessary due to
+// how the gocraft/web package implements context initialization.
+var serverOpenchain *ServerOpenchain
+var serverDevops pb.DevopsServer
+
+// ServerOpenchainREST defines the Openchain REST service object. It exposes
+// the methods available on the ServerOpenchain service and the Devops service
+// through a REST API.
+type ServerOpenchainREST struct {
+ server *ServerOpenchain
+ devops pb.DevopsServer
+}
+
+// restResult defines the response payload for a general REST interface request.
+type restResult struct {
+ OK string `json:",omitempty"`
+ Error string `json:",omitempty"`
+}
+
+// tcertsResult defines the response payload for the GetTransactionCert REST
+// interface request.
+type tcertsResult struct {
+ OK []string
+}
+
+// rpcRequest defines the JSON RPC 2.0 request payload for the /chaincode endpoint.
+type rpcRequest struct {
+ Jsonrpc *string `json:"jsonrpc,omitempty"`
+ Method *string `json:"method,omitempty"`
+ Params *pb.ChaincodeSpec `json:"params,omitempty"`
+ ID *rpcID `json:"id,omitempty"`
+}
+
+type rpcID struct {
+ StringValue *string
+ IntValue *int64
+}
+
+func (id *rpcID) UnmarshalJSON(b []byte) error {
+ var err error
+ s, n := "", int64(0)
+
+ if err = json.Unmarshal(b, &s); err == nil {
+ id.StringValue = &s
+ return nil
+ }
+ if err = json.Unmarshal(b, &n); err == nil {
+ id.IntValue = &n
+ return nil
+ }
+ return fmt.Errorf("cannot unmarshal %s into Go value of type int64 or string", string(b))
+}
+
+func (id *rpcID) MarshalJSON() ([]byte, error) {
+ if id.StringValue != nil {
+ return json.Marshal(id.StringValue)
+ }
+ if id.IntValue != nil {
+ return json.Marshal(id.IntValue)
+ }
+ return nil, errors.New("cannot marshal rpcID")
+}
+
+// rpcResponse defines the JSON RPC 2.0 response payload for the /chaincode endpoint.
+type rpcResponse struct {
+ Jsonrpc string `json:"jsonrpc,omitempty"`
+ Result *rpcResult `json:"result,omitempty"`
+ Error *rpcError `json:"error,omitempty"`
+ ID *rpcID `json:"id"`
+}
+
+// rpcResult defines the structure for an rpc sucess/error result message.
+type rpcResult struct {
+ Status string `json:"status,omitempty"`
+ Message string `json:"message,omitempty"`
+ Error *rpcError `json:"error,omitempty"`
+}
+
+// rpcError defines the structure for an rpc error.
+type rpcError struct {
+ // A Number that indicates the error type that occurred. This MUST be an integer.
+ Code int64 `json:"code,omitempty"`
+ // A String providing a short description of the error. The message SHOULD be
+ // limited to a concise single sentence.
+ Message string `json:"message,omitempty"`
+ // A Primitive or Structured value that contains additional information about
+ // the error. This may be omitted. The value of this member is defined by the
+ // Server (e.g. detailed error information, nested errors etc.).
+ Data string `json:"data,omitempty"`
+}
+
+// JSON RPC 2.0 errors and messages.
+var (
+ // Pre-defined errors and messages.
+ ParseError = &rpcError{Code: -32700, Message: "Parse error", Data: "Invalid JSON was received by the server. An error occurred on the server while parsing the JSON text."}
+ InvalidRequest = &rpcError{Code: -32600, Message: "Invalid request", Data: "The JSON sent is not a valid Request object."}
+ MethodNotFound = &rpcError{Code: -32601, Message: "Method not found", Data: "The method does not exist / is not available."}
+ InvalidParams = &rpcError{Code: -32602, Message: "Invalid params", Data: "Invalid method parameter(s)."}
+ InternalError = &rpcError{Code: -32603, Message: "Internal error", Data: "Internal JSON-RPC error."}
+
+ // -32000 to -32099 - Server error. Reserved for implementation-defined server-errors.
+ MissingRegistrationError = &rpcError{Code: -32000, Message: "Registration missing", Data: "User not logged in. Use the '/registrar' endpoint to obtain a security token."}
+ ChaincodeDeployError = &rpcError{Code: -32001, Message: "Deployment failure", Data: "Chaincode deployment has failed."}
+ ChaincodeInvokeError = &rpcError{Code: -32002, Message: "Invocation failure", Data: "Chaincode invocation has failed."}
+ ChaincodeQueryError = &rpcError{Code: -32003, Message: "Query failure", Data: "Chaincode query has failed."}
+)
+
+// SetOpenchainServer is a middleware function that sets the pointer to the
+// underlying ServerOpenchain object and the undeflying Devops object.
+func (s *ServerOpenchainREST) SetOpenchainServer(rw web.ResponseWriter, req *web.Request, next web.NextMiddlewareFunc) {
+ s.server = serverOpenchain
+ s.devops = serverDevops
+
+ next(rw, req)
+}
+
+// SetResponseType is a middleware function that sets the appropriate response
+// headers. Currently, it is setting the "Content-Type" to "application/json" as
+// well as the necessary headers in order to enable CORS for Swagger usage.
+func (s *ServerOpenchainREST) SetResponseType(rw web.ResponseWriter, req *web.Request, next web.NextMiddlewareFunc) {
+ rw.Header().Set("Content-Type", "application/json")
+
+ // Enable CORS
+ rw.Header().Set("Access-Control-Allow-Origin", "*")
+ rw.Header().Set("Access-Control-Allow-Headers", "accept, content-type")
+
+ next(rw, req)
+}
+
+// getRESTFilePath is a helper function to retrieve the local storage directory
+// of client login tokens.
+func getRESTFilePath() string {
+ localStore := viper.GetString("peer.fileSystemPath")
+ if !strings.HasSuffix(localStore, "/") {
+ localStore = localStore + "/"
+ }
+ localStore = localStore + "client/"
+ return localStore
+}
+
+// isEnrollmentIDValid returns true if the given enrollmentID matches the valid
+// pattern defined in the configuration.
+func isEnrollmentIDValid(enrollmentID string) (bool, error) {
+ pattern := viper.GetString("rest.validPatterns.enrollmentID")
+ if pattern == "" {
+ return false, errors.New("Missing configuration key rest.validPatterns.enrollmentID")
+ }
+ return regexp.MatchString(pattern, enrollmentID)
+}
+
+// validateEnrollmentIDParameter checks whether the given enrollmentID is
+// valid: if valid, returns true and does nothing; if not, writes the HTTP
+// error response and returns false.
+func validateEnrollmentIDParameter(rw web.ResponseWriter, enrollmentID string) bool {
+ validID, err := isEnrollmentIDValid(enrollmentID)
+ if err != nil {
+ rw.WriteHeader(http.StatusInternalServerError)
+ json.NewEncoder(rw).Encode(restResult{Error: err.Error()})
+ restLogger.Errorf("Error when validating enrollment ID: %s", err)
+ return false
+ }
+ if !validID {
+ rw.WriteHeader(http.StatusBadRequest)
+ json.NewEncoder(rw).Encode(restResult{Error: "Invalid enrollment ID parameter"})
+ restLogger.Errorf("Invalid enrollment ID parameter '%s'.\n", enrollmentID)
+ return false
+ }
+
+ return true
+}
+
+// Register confirms the enrollmentID and secret password of the client with the
+// CA and stores the enrollment certificate and key in the Devops server.
+func (s *ServerOpenchainREST) Register(rw web.ResponseWriter, req *web.Request) {
+ restLogger.Info("REST client login...")
+ encoder := json.NewEncoder(rw)
+
+ // Decode the incoming JSON payload
+ var loginSpec pb.Secret
+ err := jsonpb.Unmarshal(req.Body, &loginSpec)
+
+ // Check for proper JSON syntax
+ if err != nil {
+ // Client must supply payload
+ if err == io.EOF {
+ rw.WriteHeader(http.StatusBadRequest)
+ encoder.Encode(restResult{Error: "Payload must contain object Secret with enrollId and enrollSecret fields."})
+ restLogger.Error("Error: Payload must contain object Secret with enrollId and enrollSecret fields.")
+ } else {
+ rw.WriteHeader(http.StatusBadRequest)
+ encoder.Encode(restResult{Error: err.Error()})
+ restLogger.Errorf("Error: %s", err)
+ }
+
+ return
+ }
+
+ // Check that the enrollId and enrollSecret are not left blank.
+ if (loginSpec.EnrollId == "") || (loginSpec.EnrollSecret == "") {
+ rw.WriteHeader(http.StatusBadRequest)
+ encoder.Encode(restResult{Error: "enrollId and enrollSecret may not be blank."})
+ restLogger.Error("Error: enrollId and enrollSecret may not be blank.")
+
+ return
+ }
+
+ if !validateEnrollmentIDParameter(rw, loginSpec.EnrollId) {
+ return
+ }
+
+ // Retrieve the REST data storage path
+ // Returns /var/hyperledger/production/client/
+ localStore := getRESTFilePath()
+ restLogger.Infof("Local data store for client loginToken: %s", localStore)
+
+ // If the user is already logged in, return
+ if _, err := os.Stat(localStore + "loginToken_" + loginSpec.EnrollId); err == nil {
+ rw.WriteHeader(http.StatusOK)
+ encoder.Encode(restResult{OK: fmt.Sprintf("User %s is already logged in.", loginSpec.EnrollId)})
+ restLogger.Infof("User '%s' is already logged in.\n", loginSpec.EnrollId)
+
+ return
+ }
+
+ // User is not logged in, proceed with login
+ restLogger.Infof("Logging in user '%s' on REST interface...\n", loginSpec.EnrollId)
+
+ loginResult, err := s.devops.Login(context.Background(), &loginSpec)
+
+ // Check if login is successful
+ if loginResult.Status == pb.Response_SUCCESS {
+ // If /var/hyperledger/production/client/ directory does not exist, create it
+ if _, err := os.Stat(localStore); err != nil {
+ if os.IsNotExist(err) {
+ // Directory does not exist, create it
+ if err := os.Mkdir(localStore, 0755); err != nil {
+ rw.WriteHeader(http.StatusInternalServerError)
+ encoder.Encode(restResult{Error: fmt.Sprintf("Fatal error -- %s", err)})
+ panic(fmt.Errorf("Fatal error when creating %s directory: %s\n", localStore, err))
+ }
+ } else {
+ // Unexpected error
+ rw.WriteHeader(http.StatusInternalServerError)
+ encoder.Encode(restResult{Error: fmt.Sprintf("Fatal error -- %s", err)})
+ panic(fmt.Errorf("Fatal error on os.Stat of %s directory: %s\n", localStore, err))
+ }
+ }
+
+ // Store client security context into a file
+ restLogger.Infof("Storing login token for user '%s'.\n", loginSpec.EnrollId)
+ err = ioutil.WriteFile(localStore+"loginToken_"+loginSpec.EnrollId, []byte(loginSpec.EnrollId), 0755)
+ if err != nil {
+ rw.WriteHeader(http.StatusInternalServerError)
+ encoder.Encode(restResult{Error: fmt.Sprintf("Fatal error -- %s", err)})
+ panic(fmt.Errorf("Fatal error when storing client login token: %s\n", err))
+ }
+
+ rw.WriteHeader(http.StatusOK)
+ encoder.Encode(restResult{OK: fmt.Sprintf("Login successful for user '%s'.", loginSpec.EnrollId)})
+ restLogger.Infof("Login successful for user '%s'.\n", loginSpec.EnrollId)
+ } else {
+ rw.WriteHeader(http.StatusUnauthorized)
+ encoder.Encode(restResult{Error: string(loginResult.Msg)})
+ restLogger.Errorf("Error on client login: %s", string(loginResult.Msg))
+ }
+
+ return
+}
+
+// GetEnrollmentID checks whether a given user has already registered with the
+// Devops server.
+func (s *ServerOpenchainREST) GetEnrollmentID(rw web.ResponseWriter, req *web.Request) {
+ // Parse out the user enrollment ID
+ enrollmentID := req.PathParams["id"]
+
+ if !validateEnrollmentIDParameter(rw, enrollmentID) {
+ return
+ }
+
+ // Retrieve the REST data storage path
+ // Returns /var/hyperledger/production/client/
+ localStore := getRESTFilePath()
+
+ encoder := json.NewEncoder(rw)
+
+ // If the user is already logged in, return OK. Otherwise return error.
+ if _, err := os.Stat(localStore + "loginToken_" + enrollmentID); err == nil {
+ rw.WriteHeader(http.StatusOK)
+ encoder.Encode(restResult{OK: fmt.Sprintf("User %s is already logged in.", enrollmentID)})
+ restLogger.Infof("User '%s' is already logged in.\n", enrollmentID)
+ } else {
+ rw.WriteHeader(http.StatusUnauthorized)
+ encoder.Encode(restResult{Error: fmt.Sprintf("User %s must log in.", enrollmentID)})
+ restLogger.Infof("User '%s' must log in.\n", enrollmentID)
+ }
+}
+
+// DeleteEnrollmentID removes the login token of the specified user from the
+// Devops server. Once the login token is removed, the specified user will no
+// longer be able to transact without logging in again. On the REST interface,
+// this method may be used as a means of logging out an active client.
+func (s *ServerOpenchainREST) DeleteEnrollmentID(rw web.ResponseWriter, req *web.Request) {
+ // Parse out the user enrollment ID
+ enrollmentID := req.PathParams["id"]
+
+ if !validateEnrollmentIDParameter(rw, enrollmentID) {
+ return
+ }
+
+ // Retrieve the REST data storage path
+ // Returns /var/hyperledger/production/client/
+ localStore := getRESTFilePath()
+
+ // Construct the path to the login token and to the directory containing the
+ // cert and key.
+ // /var/hyperledger/production/client/loginToken_username
+ loginTok := localStore + "loginToken_" + enrollmentID
+ // /var/hyperledger/production/crypto/client/username
+ cryptoDir := viper.GetString("peer.fileSystemPath") + "/crypto/client/" + enrollmentID
+
+ // Stat both paths to determine if the user is currently logged in
+ _, err1 := os.Stat(loginTok)
+ _, err2 := os.Stat(cryptoDir)
+
+ encoder := json.NewEncoder(rw)
+
+ // If the user is not logged in, nothing to delete. Return OK.
+ if os.IsNotExist(err1) && os.IsNotExist(err2) {
+ rw.WriteHeader(http.StatusOK)
+ encoder.Encode(restResult{OK: fmt.Sprintf("User %s is not logged in.", enrollmentID)})
+ restLogger.Infof("User '%s' is not logged in.\n", enrollmentID)
+
+ return
+ }
+
+ // The user is logged in, delete the user's login token
+ if err := os.RemoveAll(loginTok); err != nil {
+ rw.WriteHeader(http.StatusInternalServerError)
+ encoder.Encode(restResult{Error: fmt.Sprintf("Error trying to delete login token for user %s: %s", enrollmentID, err)})
+ restLogger.Errorf("Error: Error trying to delete login token for user %s: %s", enrollmentID, err)
+
+ return
+ }
+
+ // The user is logged in, delete the user's cert and key directory
+ if err := os.RemoveAll(cryptoDir); err != nil {
+ rw.WriteHeader(http.StatusInternalServerError)
+ encoder.Encode(restResult{Error: fmt.Sprintf("Error trying to delete login directory for user %s: %s", enrollmentID, err)})
+ restLogger.Errorf("Error: Error trying to delete login directory for user %s: %s", enrollmentID, err)
+
+ return
+ }
+
+ rw.WriteHeader(http.StatusOK)
+ encoder.Encode(restResult{OK: fmt.Sprintf("Deleted login token and directory for user %s.", enrollmentID)})
+ restLogger.Infof("Deleted login token and directory for user %s.\n", enrollmentID)
+
+ return
+}
+
+// GetEnrollmentCert retrieves the enrollment certificate for a given user.
+func (s *ServerOpenchainREST) GetEnrollmentCert(rw web.ResponseWriter, req *web.Request) {
+ // Parse out the user enrollment ID
+ enrollmentID := req.PathParams["id"]
+
+ if !validateEnrollmentIDParameter(rw, enrollmentID) {
+ return
+ }
+
+ restLogger.Debugf("REST received enrollment certificate retrieval request for registrationID '%s'", enrollmentID)
+
+ encoder := json.NewEncoder(rw)
+
+ // If security is enabled, initialize the crypto client
+ if core.SecurityEnabled() {
+ if restLogger.IsEnabledFor(logging.DEBUG) {
+ restLogger.Debugf("Initializing secure client using context '%s'", enrollmentID)
+ }
+
+ // Initialize the security client
+ sec, err := crypto.InitClient(enrollmentID, nil)
+ if err != nil {
+ rw.WriteHeader(http.StatusBadRequest)
+ encoder.Encode(restResult{Error: err.Error()})
+ restLogger.Errorf("Error: %s", err)
+
+ return
+ }
+
+ // Obtain the client CertificateHandler
+ handler, err := sec.GetEnrollmentCertificateHandler()
+ if err != nil {
+ rw.WriteHeader(http.StatusInternalServerError)
+ encoder.Encode(restResult{Error: err.Error()})
+ restLogger.Errorf("Error: %s", err)
+
+ return
+ }
+
+ // Certificate handler can not be hil
+ if handler == nil {
+ rw.WriteHeader(http.StatusInternalServerError)
+ encoder.Encode(restResult{Error: "Error retrieving certificate handler."})
+ restLogger.Errorf("Error: Error retrieving certificate handler.")
+
+ return
+ }
+
+ // Obtain the DER encoded certificate
+ certDER := handler.GetCertificate()
+
+ // Confirm the retrieved enrollment certificate is not nil
+ if certDER == nil {
+ rw.WriteHeader(http.StatusInternalServerError)
+ encoder.Encode(restResult{Error: "Enrollment certificate is nil."})
+ restLogger.Errorf("Error: Enrollment certificate is nil.")
+
+ return
+ }
+
+ // Confirm the retrieved enrollment certificate has non-zero length
+ if len(certDER) == 0 {
+ rw.WriteHeader(http.StatusInternalServerError)
+ encoder.Encode(restResult{Error: "Enrollment certificate length is 0."})
+ restLogger.Errorf("Error: Enrollment certificate length is 0.")
+
+ return
+ }
+
+ // Transforms the DER encoded certificate to a PEM encoded certificate
+ certPEM := primitives.DERCertToPEM(certDER)
+
+ // As the enrollment certificate contains \n characters, url encode it before outputting
+ urlEncodedCert := url.QueryEscape(string(certPEM))
+
+ // Close the security client
+ crypto.CloseClient(sec)
+
+ rw.WriteHeader(http.StatusOK)
+ encoder.Encode(restResult{OK: urlEncodedCert})
+ restLogger.Debugf("Successfully retrieved enrollment certificate for secure context '%s'", enrollmentID)
+ } else {
+ // Security must be enabled to request enrollment certificates
+ rw.WriteHeader(http.StatusBadRequest)
+ encoder.Encode(restResult{Error: "Security functionality must be enabled before requesting client certificates."})
+ restLogger.Errorf("Error: Security functionality must be enabled before requesting client certificates.")
+
+ return
+ }
+}
+
+// GetTransactionCert retrieves the transaction certificate(s) for a given user.
+func (s *ServerOpenchainREST) GetTransactionCert(rw web.ResponseWriter, req *web.Request) {
+ // Parse out the user enrollment ID
+ enrollmentID := req.PathParams["id"]
+
+ if !validateEnrollmentIDParameter(rw, enrollmentID) {
+ return
+ }
+
+ restLogger.Debugf("REST received transaction certificate retrieval request for registrationID '%s'", enrollmentID)
+
+ encoder := json.NewEncoder(rw)
+
+ // Parse out the count query parameter
+ req.ParseForm()
+ queryParams := req.Form
+
+ // The default number of TCerts to retrieve is 1
+ var count uint32 = 1
+
+ // If the query parameter is present, examine the supplied value
+ if queryParams["count"] != nil {
+ // Convert string to uint. The parse function return the widest type (uint64)
+ // Setting base to 32 allows you to subsequently cast the value to uint32
+ qParam, err := strconv.ParseUint(queryParams["count"][0], 10, 32)
+
+ // Check for count parameter being a non-negative integer
+ if err != nil {
+ rw.WriteHeader(http.StatusBadRequest)
+ encoder.Encode(restResult{Error: "Count query parameter must be a non-negative integer."})
+ restLogger.Errorf("Error: Count query parameter must be a non-negative integer.")
+
+ return
+ }
+
+ // If the query parameter is within the allowed range, record it
+ if qParam > 0 && qParam <= 500 {
+ count = uint32(qParam)
+ }
+
+ // Limit the number of TCerts retrieved to 500
+ if qParam > 500 {
+ count = 500
+ }
+ }
+
+ // If security is enabled, initialize the crypto client
+ if core.SecurityEnabled() {
+ if restLogger.IsEnabledFor(logging.DEBUG) {
+ restLogger.Debugf("Initializing secure client using context '%s'", enrollmentID)
+ }
+
+ // Initialize the security client
+ sec, err := crypto.InitClient(enrollmentID, nil)
+ if err != nil {
+ rw.WriteHeader(http.StatusBadRequest)
+ encoder.Encode(restResult{Error: err.Error()})
+ restLogger.Errorf("Error: %s", err)
+
+ return
+ }
+
+ // Obtain the client CertificateHandler
+ // TODO - Replace empty attributes map
+ attributes := []string{}
+ handler, err := sec.GetTCertificateHandlerNext(attributes...)
+ if err != nil {
+ rw.WriteHeader(http.StatusInternalServerError)
+ encoder.Encode(restResult{Error: err.Error()})
+ restLogger.Errorf("Error: %s", err)
+
+ return
+ }
+
+ // Certificate handler can not be hil
+ if handler == nil {
+ rw.WriteHeader(http.StatusInternalServerError)
+ encoder.Encode(restResult{Error: "Error retrieving certificate handler."})
+ restLogger.Errorf("Error: Error retrieving certificate handler.")
+
+ return
+ }
+
+ // Retrieve the required number of TCerts
+ tcertArray := make([]string, count)
+ var i uint32
+ for i = 0; i < count; i++ {
+ // Obtain the DER encoded certificate
+ certDER := handler.GetCertificate()
+
+ // Confirm the retrieved enrollment certificate is not nil
+ if certDER == nil {
+ rw.WriteHeader(http.StatusInternalServerError)
+ encoder.Encode(restResult{Error: "Transaction certificate is nil."})
+ restLogger.Errorf("Error: Transaction certificate is nil.")
+
+ return
+ }
+
+ // Confirm the retrieved enrollment certificate has non-zero length
+ if len(certDER) == 0 {
+ rw.WriteHeader(http.StatusInternalServerError)
+ encoder.Encode(restResult{Error: "Transaction certificate length is 0."})
+ restLogger.Errorf("Error: Transaction certificate length is 0.")
+
+ return
+ }
+
+ // Transforms the DER encoded certificate to a PEM encoded certificate
+ certPEM := primitives.DERCertToPEM(certDER)
+
+ // As the transaction certificate contains \n characters, url encode it before outputting
+ urlEncodedCert := url.QueryEscape(string(certPEM))
+
+ // Add the urlEncodedCert transaction certificate to the certificate array
+ tcertArray[i] = urlEncodedCert
+ }
+
+ // Close the security client
+ crypto.CloseClient(sec)
+
+ rw.WriteHeader(http.StatusOK)
+ encoder.Encode(tcertsResult{OK: tcertArray})
+ restLogger.Debugf("Successfully retrieved transaction certificates for secure context '%s'", enrollmentID)
+ } else {
+ // Security must be enabled to request transaction certificates
+ rw.WriteHeader(http.StatusBadRequest)
+ encoder.Encode(restResult{Error: "Security functionality must be enabled before requesting client certificates."})
+ restLogger.Errorf("Error: Security functionality must be enabled before requesting client certificates.")
+
+ return
+ }
+}
+
+// GetBlockchainInfo returns information about the blockchain ledger such as
+// height, current block hash, and previous block hash.
+func (s *ServerOpenchainREST) GetBlockchainInfo(rw web.ResponseWriter, req *web.Request) {
+ info, err := s.server.GetBlockchainInfo(context.Background(), &google_protobuf.Empty{})
+
+ encoder := json.NewEncoder(rw)
+
+ // Check for error
+ if err != nil {
+ // Failure
+ rw.WriteHeader(http.StatusBadRequest)
+ encoder.Encode(restResult{Error: err.Error()})
+ } else {
+ // Success
+ rw.WriteHeader(http.StatusOK)
+ encoder.Encode(info)
+ }
+}
+
+// GetBlockByNumber returns the data contained within a specific block in the
+// blockchain. The genesis block is block zero.
+func (s *ServerOpenchainREST) GetBlockByNumber(rw web.ResponseWriter, req *web.Request) {
+ // Parse out the Block id
+ blockNumber, err := strconv.ParseUint(req.PathParams["id"], 10, 64)
+
+ encoder := json.NewEncoder(rw)
+
+ // Check for proper Block id syntax
+ if err != nil {
+ // Failure
+ rw.WriteHeader(http.StatusBadRequest)
+ encoder.Encode(restResult{Error: "Block id must be an integer (uint64)."})
+ return
+ }
+
+ // Retrieve Block from blockchain
+ block, err := s.server.GetBlockByNumber(context.Background(), &pb.BlockNumber{Number: blockNumber})
+
+ if (err == ErrNotFound) || (err == nil && block == nil) {
+ rw.WriteHeader(http.StatusNotFound)
+ encoder.Encode(restResult{Error: ErrNotFound.Error()})
+ return
+ }
+
+ if err != nil {
+ rw.WriteHeader(http.StatusInternalServerError)
+ encoder.Encode(restResult{Error: err.Error()})
+ return
+ }
+
+ // Success
+ rw.WriteHeader(http.StatusOK)
+ encoder.Encode(block)
+}
+
+// GetTransactionByUUID returns a transaction matching the specified UUID
+func (s *ServerOpenchainREST) GetTransactionByUUID(rw web.ResponseWriter, req *web.Request) {
+ // Parse out the transaction UUID
+ txUUID := req.PathParams["uuid"]
+
+ // Retrieve the transaction matching the UUID
+ tx, err := s.server.GetTransactionByUUID(context.Background(), txUUID)
+
+ encoder := json.NewEncoder(rw)
+
+ // Check for Error
+ if err != nil {
+ switch err {
+ case ErrNotFound:
+ rw.WriteHeader(http.StatusNotFound)
+ encoder.Encode(restResult{Error: fmt.Sprintf("Transaction %s is not found.", txUUID)})
+ default:
+ rw.WriteHeader(http.StatusInternalServerError)
+ encoder.Encode(restResult{Error: fmt.Sprintf("Error retrieving transaction %s: %s.", txUUID, err)})
+ restLogger.Errorf("Error retrieving transaction %s: %s", txUUID, err)
+ }
+ } else {
+ // Return existing transaction
+ rw.WriteHeader(http.StatusOK)
+ encoder.Encode(tx)
+ restLogger.Infof("Successfully retrieved transaction: %s", txUUID)
+ }
+}
+
+// Deploy first builds the chaincode package and subsequently deploys it to the
+// blockchain.
+//
+// Deprecated: use the /chaincode endpoint instead (routes to ProcessChaincode)
+func (s *ServerOpenchainREST) Deploy(rw web.ResponseWriter, req *web.Request) {
+ restLogger.Info("REST deploying chaincode...")
+
+ // This endpoint has been deprecated. Add a warning header to all responses.
+ rw.Header().Set("Warning", "299 - /devops/deploy endpoint has been deprecated. Use /chaincode endpoint instead.")
+
+ // Decode the incoming JSON payload
+ var spec pb.ChaincodeSpec
+ err := jsonpb.Unmarshal(req.Body, &spec)
+
+ // Check for proper JSON syntax
+ if err != nil {
+ // Unmarshall returns a " character around unrecognized fields in the case
+ // of a schema validation failure. These must be replaced with a ' character.
+ // Otherwise, the returned JSON is invalid.
+ errVal := strings.Replace(err.Error(), "\"", "'", -1)
+
+ // Client must supply payload
+ if err == io.EOF {
+ rw.WriteHeader(http.StatusBadRequest)
+ fmt.Fprintf(rw, "{\"Error\": \"Payload must contain a ChaincodeSpec.\"}")
+ restLogger.Error("{\"Error\": \"Payload must contain a ChaincodeSpec.\"}")
+ } else {
+ rw.WriteHeader(http.StatusBadRequest)
+ fmt.Fprintf(rw, "{\"Error\": \"%s\"}", errVal)
+ restLogger.Errorf("{\"Error\": \"%s\"}", errVal)
+ }
+
+ return
+ }
+
+ // Check that the ChaincodeID is not nil.
+ if spec.ChaincodeID == nil {
+ rw.WriteHeader(http.StatusBadRequest)
+ fmt.Fprintf(rw, "{\"Error\": \"Payload must contain a ChaincodeID.\"}")
+ restLogger.Error("{\"Error\": \"Payload must contain a ChaincodeID.\"}")
+
+ return
+ }
+
+ // If the peer is running in development mode, confirm that the Chaincode name
+ // is not left blank. If the peer is running in production mode, confirm that
+ // the Chaincode path is not left blank. This is necessary as in development
+ // mode, the chaincode is identified by name not by path during the deploy
+ // process.
+ if viper.GetString("chaincode.mode") == chaincode.DevModeUserRunsChaincode {
+ // Check that the Chaincode name is not blank.
+ if spec.ChaincodeID.Name == "" {
+ rw.WriteHeader(http.StatusBadRequest)
+ fmt.Fprintf(rw, "{\"Error\": \"Chaincode name may not be blank in development mode.\"}")
+ restLogger.Error("{\"Error\": \"Chaincode name may not be blank in development mode.\"}")
+
+ return
+ }
+ } else {
+ // Check that the Chaincode path is not left blank.
+ if spec.ChaincodeID.Path == "" {
+ rw.WriteHeader(http.StatusBadRequest)
+ fmt.Fprintf(rw, "{\"Error\": \"Chaincode path may not be blank.\"}")
+ restLogger.Error("{\"Error\": \"Chaincode path may not be blank.\"}")
+
+ return
+ }
+ }
+
+ // Check that the CtorMsg is not left blank.
+ if (spec.CtorMsg == nil) || (spec.CtorMsg.Function == "") {
+ rw.WriteHeader(http.StatusBadRequest)
+ fmt.Fprintf(rw, "{\"Error\": \"Payload must contain a CtorMsg with a Chaincode function name.\"}")
+ restLogger.Error("{\"Error\": \"Payload must contain a CtorMsg with a Chaincode function name.\"}")
+
+ return
+ }
+
+ // If security is enabled, add client login token
+ if core.SecurityEnabled() {
+ chaincodeUsr := spec.SecureContext
+ if chaincodeUsr == "" {
+ rw.WriteHeader(http.StatusBadRequest)
+ fmt.Fprintf(rw, "{\"Error\": \"Must supply username for chaincode when security is enabled.\"}")
+ restLogger.Error("{\"Error\": \"Must supply username for chaincode when security is enabled.\"}")
+
+ return
+ }
+
+ // Retrieve the REST data storage path
+ // Returns /var/hyperledger/production/client/
+ localStore := getRESTFilePath()
+
+ // Check if the user is logged in before sending transaction
+ if _, err := os.Stat(localStore + "loginToken_" + chaincodeUsr); err == nil {
+ restLogger.Infof("Local user '%s' is already logged in. Retrieving login token.\n", chaincodeUsr)
+
+ // Read in the login token
+ token, err := ioutil.ReadFile(localStore + "loginToken_" + chaincodeUsr)
+ if err != nil {
+ rw.WriteHeader(http.StatusInternalServerError)
+ fmt.Fprintf(rw, "{\"Error\": \"Fatal error -- %s\"}", err)
+ panic(fmt.Errorf("Fatal error when reading client login token: %s\n", err))
+ }
+
+ // Add the login token to the chaincodeSpec
+ spec.SecureContext = string(token)
+
+ // If privacy is enabled, mark chaincode as confidential
+ if viper.GetBool("security.privacy") {
+ spec.ConfidentialityLevel = pb.ConfidentialityLevel_CONFIDENTIAL
+ }
+ } else {
+ // Check if the token is not there and fail
+ if os.IsNotExist(err) {
+ rw.WriteHeader(http.StatusUnauthorized)
+ fmt.Fprintf(rw, "{\"Error\": \"User not logged in. Use the '/registrar' endpoint to obtain a security token.\"}")
+ restLogger.Error("{\"Error\": \"User not logged in. Use the '/registrar' endpoint to obtain a security token.\"}")
+
+ return
+ }
+ // Unexpected error
+ rw.WriteHeader(http.StatusInternalServerError)
+ fmt.Fprintf(rw, "{\"Error\": \"Fatal error -- %s\"}", err)
+ panic(fmt.Errorf("Fatal error when checking for client login token: %s\n", err))
+ }
+ }
+
+ // Deploy the ChaincodeSpec
+ chaincodeDeploymentSpec, err := s.devops.Deploy(context.Background(), &spec)
+ if err != nil {
+ // Replace " characters with '
+ errVal := strings.Replace(err.Error(), "\"", "'", -1)
+
+ rw.WriteHeader(http.StatusBadRequest)
+ fmt.Fprintf(rw, "{\"Error\": \"%s\"}", errVal)
+ restLogger.Errorf("{\"Error\": \"Deploying Chaincode -- %s\"}", errVal)
+
+ return
+ }
+
+ // Clients will need the chaincode name in order to invoke or query it
+ chainID := chaincodeDeploymentSpec.ChaincodeSpec.ChaincodeID.Name
+
+ rw.WriteHeader(http.StatusOK)
+ fmt.Fprintf(rw, "{\"OK\": \"Successfully deployed chainCode.\",\"message\":\""+chainID+"\"}")
+ restLogger.Infof("Successfully deployed chainCode: %s \n", chainID)
+}
+
+// Invoke executes a specified function within a target Chaincode.
+//
+// Deprecated: use the /chaincode endpoint instead (routes to ProcessChaincode)
+func (s *ServerOpenchainREST) Invoke(rw web.ResponseWriter, req *web.Request) {
+ restLogger.Info("REST invoking chaincode...")
+
+ // This endpoint has been deprecated. Add a warning header to all responses.
+ rw.Header().Set("Warning", "299 - /devops/invoke endpoint has been deprecated. Use /chaincode endpoint instead.")
+
+ // Decode the incoming JSON payload
+ var spec pb.ChaincodeInvocationSpec
+ err := jsonpb.Unmarshal(req.Body, &spec)
+
+ // Check for proper JSON syntax
+ if err != nil {
+ // Unmarshall returns a " character around unrecognized fields in the case
+ // of a schema validation failure. These must be replaced with a ' character.
+ // Otherwise, the returned JSON is invalid.
+ errVal := strings.Replace(err.Error(), "\"", "'", -1)
+
+ // Client must supply payload
+ if err == io.EOF {
+ rw.WriteHeader(http.StatusBadRequest)
+ fmt.Fprintf(rw, "{\"Error\": \"Payload must contain a ChaincodeInvocationSpec.\"}")
+ restLogger.Error("{\"Error\": \"Payload must contain a ChaincodeInvocationSpec.\"}")
+ } else {
+ rw.WriteHeader(http.StatusBadRequest)
+ fmt.Fprintf(rw, "{\"Error\": \"%s\"}", errVal)
+ restLogger.Errorf("{\"Error\": \"%s\"}", errVal)
+ }
+
+ return
+ }
+
+ // Check that the ChaincodeSpec is not left blank.
+ if spec.ChaincodeSpec == nil {
+ rw.WriteHeader(http.StatusBadRequest)
+ fmt.Fprintf(rw, "{\"Error\": \"Payload must contain a ChaincodeSpec.\"}")
+ restLogger.Error("{\"Error\": \"Payload must contain a ChaincodeSpec.\"}")
+
+ return
+ }
+
+ // Check that the ChaincodeID is not left blank.
+ if spec.ChaincodeSpec.ChaincodeID == nil {
+ rw.WriteHeader(http.StatusBadRequest)
+ fmt.Fprintf(rw, "{\"Error\": \"Payload must contain a ChaincodeID.\"}")
+ restLogger.Error("{\"Error\": \"Payload must contain a ChaincodeID.\"}")
+
+ return
+ }
+
+ // Check that the Chaincode name is not blank.
+ if spec.ChaincodeSpec.ChaincodeID.Name == "" {
+ rw.WriteHeader(http.StatusBadRequest)
+ fmt.Fprintf(rw, "{\"Error\": \"Chaincode name may not be blank.\"}")
+ restLogger.Error("{\"Error\": \"Chaincode name may not be blank.\"}")
+
+ return
+ }
+
+ // Check that the CtorMsg is not left blank.
+ if (spec.ChaincodeSpec.CtorMsg == nil) || (spec.ChaincodeSpec.CtorMsg.Function == "") {
+ rw.WriteHeader(http.StatusBadRequest)
+ fmt.Fprintf(rw, "{\"Error\": \"Payload must contain a CtorMsg with a Chaincode function name.\"}")
+ restLogger.Error("{\"Error\": \"Payload must contain a CtorMsg with a Chaincode function name.\"}")
+
+ return
+ }
+
+ // If security is enabled, add client login token
+ if core.SecurityEnabled() {
+ chaincodeUsr := spec.ChaincodeSpec.SecureContext
+ if chaincodeUsr == "" {
+ rw.WriteHeader(http.StatusBadRequest)
+ fmt.Fprintf(rw, "{\"Error\": \"Must supply username for chaincode when security is enabled.\"}")
+ restLogger.Error("{\"Error\": \"Must supply username for chaincode when security is enabled.\"}")
+
+ return
+ }
+
+ // Retrieve the REST data storage path
+ // Returns /var/hyperledger/production/client/
+ localStore := getRESTFilePath()
+
+ // Check if the user is logged in before sending transaction
+ if _, err := os.Stat(localStore + "loginToken_" + chaincodeUsr); err == nil {
+ restLogger.Infof("Local user '%s' is already logged in. Retrieving login token.\n", chaincodeUsr)
+
+ // Read in the login token
+ token, err := ioutil.ReadFile(localStore + "loginToken_" + chaincodeUsr)
+ if err != nil {
+ rw.WriteHeader(http.StatusInternalServerError)
+ fmt.Fprintf(rw, "{\"Error\": \"Fatal error -- %s\"}", err)
+ panic(fmt.Errorf("Fatal error when reading client login token: %s\n", err))
+ }
+
+ // Add the login token to the chaincodeSpec
+ spec.ChaincodeSpec.SecureContext = string(token)
+
+ // If privacy is enabled, mark chaincode as confidential
+ if viper.GetBool("security.privacy") {
+ spec.ChaincodeSpec.ConfidentialityLevel = pb.ConfidentialityLevel_CONFIDENTIAL
+ }
+ } else {
+ // Check if the token is not there and fail
+ if os.IsNotExist(err) {
+ rw.WriteHeader(http.StatusUnauthorized)
+ fmt.Fprintf(rw, "{\"Error\": \"User not logged in. Use the '/registrar' endpoint to obtain a security token.\"}")
+ restLogger.Error("{\"Error\": \"User not logged in. Use the '/registrar' endpoint to obtain a security token.\"}")
+
+ return
+ }
+ // Unexpected error
+ rw.WriteHeader(http.StatusInternalServerError)
+ fmt.Fprintf(rw, "{\"Error\": \"Fatal error -- %s\"}", err)
+ panic(fmt.Errorf("Fatal error when checking for client login token: %s\n", err))
+ }
+ }
+
+ // Invoke the chainCode
+ resp, err := s.devops.Invoke(context.Background(), &spec)
+ if err != nil {
+ // Replace " characters with '
+ errVal := strings.Replace(err.Error(), "\"", "'", -1)
+
+ rw.WriteHeader(http.StatusBadRequest)
+ fmt.Fprintf(rw, "{\"Error\": \"%s\"}", errVal)
+ restLogger.Errorf("{\"Error\": \"Invoking Chaincode -- %s\"}", errVal)
+
+ return
+ }
+
+ // Clients will need the txuuid in order to track it after invocation
+ txuuid := resp.Msg
+
+ rw.WriteHeader(http.StatusOK)
+ // Make a clarification in the invoke response message, that the transaction has been successfully submitted but not completed
+ fmt.Fprintf(rw, "{\"OK\": \"Successfully submitted invoke transaction.\",\"message\": \"%s\"}", string(txuuid))
+ restLogger.Infof("Successfully submitted invoke transaction (%s).\n", string(txuuid))
+}
+
+// Query performs the requested query on the target Chaincode.
+//
+// Deprecated: use the /chaincode endpoint instead (routes to ProcessChaincode)
+func (s *ServerOpenchainREST) Query(rw web.ResponseWriter, req *web.Request) {
+ restLogger.Info("REST querying chaincode...")
+
+ // This endpoint has been deprecated. Add a warning header to all responses.
+ rw.Header().Set("Warning", "299 - /devops/query endpoint has been deprecated. Use /chaincode endpoint instead.")
+
+ // Decode the incoming JSON payload
+ var spec pb.ChaincodeInvocationSpec
+ err := jsonpb.Unmarshal(req.Body, &spec)
+
+ // Check for proper JSON syntax
+ if err != nil {
+ // Unmarshall returns a " character around unrecognized fields in the case
+ // of a schema validation failure. These must be replaced with a ' character.
+ // Otherwise, the returned JSON is invalid.
+ errVal := strings.Replace(err.Error(), "\"", "'", -1)
+
+ // Client must supply payload
+ if err == io.EOF {
+ rw.WriteHeader(http.StatusBadRequest)
+ fmt.Fprintf(rw, "{\"Error\": \"Payload must contain a ChaincodeInvocationSpec.\"}")
+ restLogger.Error("{\"Error\": \"Payload must contain a ChaincodeInvocationSpec.\"}")
+ } else {
+ rw.WriteHeader(http.StatusBadRequest)
+ fmt.Fprintf(rw, "{\"Error\": \"%s\"}", errVal)
+ restLogger.Errorf("{\"Error\": \"%s\"}", errVal)
+ }
+
+ return
+ }
+
+ // Check that the ChaincodeSpec is not left blank.
+ if spec.ChaincodeSpec == nil {
+ rw.WriteHeader(http.StatusBadRequest)
+ fmt.Fprintf(rw, "{\"Error\": \"Payload must contain a ChaincodeSpec.\"}")
+ restLogger.Error("{\"Error\": \"Payload must contain a ChaincodeSpec.\"}")
+
+ return
+ }
+
+ // Check that the ChaincodeID is not left blank.
+ if spec.ChaincodeSpec.ChaincodeID == nil {
+ rw.WriteHeader(http.StatusBadRequest)
+ fmt.Fprintf(rw, "{\"Error\": \"Payload must contain a ChaincodeID.\"}")
+ restLogger.Error("{\"Error\": \"Payload must contain a ChaincodeID.\"}")
+
+ return
+ }
+
+ // Check that the Chaincode name is not blank.
+ if spec.ChaincodeSpec.ChaincodeID.Name == "" {
+ rw.WriteHeader(http.StatusBadRequest)
+ fmt.Fprintf(rw, "{\"Error\": \"Chaincode name may not be blank.\"}")
+ restLogger.Error("{\"Error\": \"Chaincode name may not be blank.\"}")
+
+ return
+ }
+
+ // Check that the CtorMsg is not left blank.
+ if (spec.ChaincodeSpec.CtorMsg == nil) || (spec.ChaincodeSpec.CtorMsg.Function == "") {
+ rw.WriteHeader(http.StatusBadRequest)
+ fmt.Fprintf(rw, "{\"Error\": \"Payload must contain a CtorMsg with a Chaincode function name.\"}")
+ restLogger.Error("{\"Error\": \"Payload must contain a CtorMsg with a Chaincode function name.\"}")
+
+ return
+ }
+
+ // If security is enabled, add client login token
+ if core.SecurityEnabled() {
+ chaincodeUsr := spec.ChaincodeSpec.SecureContext
+ if chaincodeUsr == "" {
+ rw.WriteHeader(http.StatusBadRequest)
+ fmt.Fprintf(rw, "{\"Error\": \"Must supply username for chaincode when security is enabled.\"}")
+ restLogger.Error("{\"Error\": \"Must supply username for chaincode when security is enabled.\"}")
+
+ return
+ }
+
+ // Retrieve the REST data storage path
+ // Returns /var/hyperledger/production/client/
+ localStore := getRESTFilePath()
+
+ // Check if the user is logged in before sending transaction
+ if _, err := os.Stat(localStore + "loginToken_" + chaincodeUsr); err == nil {
+ restLogger.Infof("Local user '%s' is already logged in. Retrieving login token.\n", chaincodeUsr)
+
+ // Read in the login token
+ token, err := ioutil.ReadFile(localStore + "loginToken_" + chaincodeUsr)
+ if err != nil {
+ rw.WriteHeader(http.StatusInternalServerError)
+ fmt.Fprintf(rw, "{\"Error\": \"Fatal error -- %s\"}", err)
+ panic(fmt.Errorf("Fatal error when reading client login token: %s\n", err))
+ }
+
+ // Add the login token to the chaincodeSpec
+ spec.ChaincodeSpec.SecureContext = string(token)
+
+ // If privacy is enabled, mark chaincode as confidential
+ if viper.GetBool("security.privacy") {
+ spec.ChaincodeSpec.ConfidentialityLevel = pb.ConfidentialityLevel_CONFIDENTIAL
+ }
+ } else {
+ // Check if the token is not there and fail
+ if os.IsNotExist(err) {
+ rw.WriteHeader(http.StatusUnauthorized)
+ fmt.Fprintf(rw, "{\"Error\": \"User not logged in. Use the '/registrar' endpoint to obtain a security token.\"}")
+ restLogger.Error("{\"Error\": \"User not logged in. Use the '/registrar' endpoint to obtain a security token.\"}")
+
+ return
+ }
+ // Unexpected error
+ rw.WriteHeader(http.StatusInternalServerError)
+ fmt.Fprintf(rw, "{\"Error\": \"Fatal error -- %s\"}", err)
+ panic(fmt.Errorf("Fatal error when checking for client login token: %s\n", err))
+ }
+ }
+
+ // Query the chainCode
+ resp, err := s.devops.Query(context.Background(), &spec)
+ if err != nil {
+ // Replace " characters with '
+ errVal := strings.Replace(err.Error(), "\"", "'", -1)
+
+ rw.WriteHeader(http.StatusBadRequest)
+ fmt.Fprintf(rw, "{\"Error\": \"%s\"}", errVal)
+ restLogger.Errorf("{\"Error\": \"Querying Chaincode -- %s\"}", errVal)
+
+ return
+ }
+
+ // Determine if the response received is JSON formatted
+ if isJSON(string(resp.Msg)) {
+ // Response is JSON formatted, return it as is
+ rw.WriteHeader(http.StatusOK)
+ fmt.Fprintf(rw, "{\"OK\": %s}", string(resp.Msg))
+ } else {
+ // Response is not JSON formatted, construct a JSON formatted response
+ jsonResponse, err := json.Marshal(restResult{OK: string(resp.Msg)})
+ if err != nil {
+ rw.WriteHeader(http.StatusInternalServerError)
+ fmt.Fprintf(rw, "{\"Error\": \"%s\"}", err)
+ restLogger.Errorf("{\"Error marshalling query response\": \"%s\"}", err)
+
+ return
+ }
+
+ rw.WriteHeader(http.StatusOK)
+ fmt.Fprintf(rw, string(jsonResponse))
+ }
+}
+
+// ProcessChaincode implements JSON RPC 2.0 specification for chaincode deploy, invoke, and query.
+func (s *ServerOpenchainREST) ProcessChaincode(rw web.ResponseWriter, req *web.Request) {
+ restLogger.Info("REST processing chaincode request...")
+
+ encoder := json.NewEncoder(rw)
+
+ // Read in the incoming request payload
+ reqBody, err := ioutil.ReadAll(req.Body)
+ if err != nil {
+ // Format the error appropriately and produce JSON RPC 2.0 response
+ errObj := formatRPCError(InternalError.Code, InternalError.Message, "Internal JSON-RPC error when reading request body.")
+ rw.WriteHeader(http.StatusInternalServerError)
+ encoder.Encode(formatRPCResponse(errObj, nil))
+ restLogger.Error("Internal JSON-RPC error when reading request body.")
+ return
+ }
+
+ // Incoming request body may not be empty, client must supply request payload
+ if string(reqBody) == "" {
+ // Format the error appropriately and produce JSON RPC 2.0 response
+ errObj := formatRPCError(InvalidRequest.Code, InvalidRequest.Message, "Client must supply a payload for chaincode requests.")
+ rw.WriteHeader(http.StatusBadRequest)
+ encoder.Encode(formatRPCResponse(errObj, nil))
+ restLogger.Error("Client must supply a payload for chaincode requests.")
+ return
+ }
+
+ // Payload must conform to the following structure
+ var requestPayload rpcRequest
+
+ // Decode the request payload as an rpcRequest structure. There will be an
+ // error here if the incoming JSON is invalid (e.g. missing brace or comma).
+ err = json.Unmarshal(reqBody, &requestPayload)
+ if err != nil {
+ // Format the error appropriately and produce JSON RPC 2.0 response
+ errObj := formatRPCError(ParseError.Code, ParseError.Message, fmt.Sprintf("Error unmarshalling chaincode request payload: %s", err))
+ rw.WriteHeader(http.StatusBadRequest)
+ encoder.Encode(formatRPCResponse(errObj, nil))
+ restLogger.Errorf("Error unmarshalling chaincode request payload: %s", err)
+ return
+ }
+
+ //
+ // After parsing the request payload successfully, determine if the incoming
+ // request payload contains an "id" member. If id is not included, the request
+ // is assumed to be a notification. The Server MUST NOT reply to a Notification.
+ // Notifications are not confirmable by definition, since they do not have a
+ // Response object to be returned. As such, the Client would not be aware of
+ // any errors (like e.g. "Invalid params","Internal error").
+ //
+
+ notification := false
+ if requestPayload.ID == nil {
+ notification = true
+ }
+
+ // Insure that JSON RPC version string is present and is exactly "2.0"
+ if requestPayload.Jsonrpc == nil {
+ // If the request is not a notification, produce a response.
+ if !notification {
+ // Format the error appropriately and produce JSON RPC 2.0 response
+ errObj := formatRPCError(InvalidRequest.Code, InvalidRequest.Message, "Missing JSON RPC 2.0 version string.")
+ rw.WriteHeader(http.StatusBadRequest)
+ encoder.Encode(formatRPCResponse(errObj, requestPayload.ID))
+ }
+ restLogger.Error("Missing JSON RPC version string.")
+
+ return
+ } else if *(requestPayload.Jsonrpc) != "2.0" {
+ // If the request is not a notification, produce a response.
+ if !notification {
+ // Format the error appropriately and produce JSON RPC 2.0 response
+ errObj := formatRPCError(InvalidRequest.Code, InvalidRequest.Message, "Invalid JSON RPC 2.0 version string. Must be 2.0.")
+ rw.WriteHeader(http.StatusBadRequest)
+ encoder.Encode(formatRPCResponse(errObj, requestPayload.ID))
+ }
+ restLogger.Error("Invalid JSON RPC version string. Must be 2.0.")
+
+ return
+ }
+
+ // Insure that the JSON method string is present and is either deploy, invoke or query
+ if requestPayload.Method == nil {
+ // If the request is not a notification, produce a response.
+ if !notification {
+ // Format the error appropriately and produce JSON RPC 2.0 response
+ errObj := formatRPCError(InvalidRequest.Code, InvalidRequest.Message, "Missing JSON RPC 2.0 method string.")
+ rw.WriteHeader(http.StatusBadRequest)
+ encoder.Encode(formatRPCResponse(errObj, requestPayload.ID))
+ }
+ restLogger.Error("Missing JSON RPC 2.0 method string.")
+
+ return
+ } else if (*(requestPayload.Method) != "deploy") && (*(requestPayload.Method) != "invoke") && (*(requestPayload.Method) != "query") {
+ // If the request is not a notification, produce a response.
+ if !notification {
+ // Format the error appropriately and produce JSON RPC 2.0 response
+ errObj := formatRPCError(MethodNotFound.Code, MethodNotFound.Message, "Requested method does not exist.")
+ rw.WriteHeader(http.StatusNotFound)
+ encoder.Encode(formatRPCResponse(errObj, requestPayload.ID))
+ }
+ restLogger.Error("Requested method does not exist.")
+
+ return
+ }
+
+ //
+ // Confirm the requested chaincode method and execute accordingly
+ //
+
+ // Variable that will hold the execution result
+ var result rpcResult
+
+ if *(requestPayload.Method) == "deploy" {
+
+ //
+ // Chaincode deployment was requested
+ //
+
+ // Payload params field must contain a ChaincodeSpec message
+ if requestPayload.Params == nil {
+ // If the request is not a notification, produce a response.
+ if !notification {
+ // Format the error appropriately and produce JSON RPC 2.0 response
+ errObj := formatRPCError(InvalidParams.Code, InvalidParams.Message, "Client must supply ChaincodeSpec for chaincode deploy request.")
+ rw.WriteHeader(http.StatusBadRequest)
+ encoder.Encode(formatRPCResponse(errObj, requestPayload.ID))
+ }
+ restLogger.Error("Client must supply ChaincodeSpec for chaincode deploy request.")
+
+ return
+ }
+
+ // Extract the ChaincodeSpec from the params field
+ deploySpec := requestPayload.Params
+
+ // Process the chaincode deployment request and record the result
+ result = s.processChaincodeDeploy(deploySpec)
+ } else {
+
+ //
+ // Chaincode invocation/query was reqested
+ //
+
+ // Because chaincode invocation/query requests require a ChaincodeInvocationSpec
+ // message instead of a ChaincodeSpec message, we must initialize it here
+ // before proceeding.
+ invokequeryPayload := &pb.ChaincodeInvocationSpec{ChaincodeSpec: requestPayload.Params}
+
+ // Payload params field must contain a ChaincodeSpec message
+ if invokequeryPayload.ChaincodeSpec == nil {
+ // If the request is not a notification, produce a response.
+ if !notification {
+ // Format the error appropriately and produce JSON RPC 2.0 response
+ errObj := formatRPCError(InvalidParams.Code, InvalidParams.Message, "Client must supply ChaincodeSpec for chaincode deploy request.")
+ rw.WriteHeader(http.StatusBadRequest)
+ encoder.Encode(formatRPCResponse(errObj, requestPayload.ID))
+ }
+ restLogger.Error("Client must supply ChaincodeSpec for chaincode invoke or query request.")
+
+ return
+ }
+
+ // Process the chaincode invoke/query request and record the result
+ result = s.processChaincodeInvokeOrQuery(*(requestPayload.Method), invokequeryPayload)
+ }
+
+ //
+ // Generate correctly formatted JSON RPC 2.0 response payload
+ //
+
+ response := formatRPCResponse(result, requestPayload.ID)
+ jsonResponse, _ := json.Marshal(response)
+
+ // If the request is not a notification, produce a response.
+ if !notification {
+ rw.WriteHeader(http.StatusOK)
+ rw.Write(jsonResponse)
+ }
+
+ // Make a clarification in the invoke response message, that the transaction has been successfully submitted but not completed
+ if *(requestPayload.Method) == "invoke" {
+ restLogger.Infof("REST successfully submitted invoke transaction: %s", string(jsonResponse))
+ } else {
+ restLogger.Infof("REST successfully %s chaincode: %s", *(requestPayload.Method), string(jsonResponse))
+ }
+
+ return
+}
+
+// processChaincodeDeploy triggers chaincode deploy and returns a result or an error
+func (s *ServerOpenchainREST) processChaincodeDeploy(spec *pb.ChaincodeSpec) rpcResult {
+ restLogger.Info("REST deploying chaincode...")
+
+ // Check that the ChaincodeID is not nil.
+ if spec.ChaincodeID == nil {
+ // Format the error appropriately for further processing
+ error := formatRPCError(InvalidParams.Code, InvalidParams.Message, "Payload must contain a ChaincodeID.")
+ restLogger.Error("Payload must contain a ChaincodeID.")
+
+ return error
+ }
+
+ // If the peer is running in development mode, confirm that the Chaincode name
+ // is not left blank. If the peer is running in production mode, confirm that
+ // the Chaincode path is not left blank. This is necessary as in development
+ // mode, the chaincode is identified by name not by path during the deploy
+ // process.
+ if viper.GetString("chaincode.mode") == chaincode.DevModeUserRunsChaincode {
+ //
+ // Development mode -- check chaincode name
+ //
+
+ // Check that the Chaincode name is not blank.
+ if spec.ChaincodeID.Name == "" {
+ // Format the error appropriately for further processing
+ error := formatRPCError(InvalidParams.Code, InvalidParams.Message, "Chaincode name may not be blank in development mode.")
+ restLogger.Error("Chaincode name may not be blank in development mode.")
+
+ return error
+ }
+ } else {
+ //
+ // Network mode -- check chaincode path
+ //
+
+ // Check that the Chaincode path is not left blank.
+ if spec.ChaincodeID.Path == "" {
+ // Format the error appropriately for further processing
+ error := formatRPCError(InvalidParams.Code, InvalidParams.Message, "Chaincode path may not be blank.")
+ restLogger.Error("Chaincode path may not be blank.")
+
+ return error
+ }
+ }
+
+ // Check that the CtorMsg is not left blank.
+ if (spec.CtorMsg == nil) || (spec.CtorMsg.Function == "") {
+ // Format the error appropriately for further processing
+ error := formatRPCError(InvalidParams.Code, InvalidParams.Message, "Payload must contain a CtorMsg with a Chaincode function name.")
+ restLogger.Error("Payload must contain a CtorMsg with a Chaincode function name.")
+
+ return error
+ }
+
+ //
+ // Check if security is enabled
+ //
+
+ if core.SecurityEnabled() {
+ // User registrationID must be present inside request payload with security enabled
+ chaincodeUsr := spec.SecureContext
+ if chaincodeUsr == "" {
+ // Format the error appropriately for further processing
+ error := formatRPCError(InvalidParams.Code, InvalidParams.Message, "Must supply username for chaincode when security is enabled.")
+ restLogger.Error("Must supply username for chaincode when security is enabled.")
+
+ return error
+ }
+
+ // Retrieve the REST data storage path
+ // Returns /var/hyperledger/production/client/
+ localStore := getRESTFilePath()
+
+ // Check if the user is logged in before sending transaction
+ if _, err := os.Stat(localStore + "loginToken_" + chaincodeUsr); err == nil {
+ // No error returned, therefore token exists so user is already logged in
+ restLogger.Infof("Local user '%s' is already logged in. Retrieving login token.", chaincodeUsr)
+
+ // Read in the login token
+ token, err := ioutil.ReadFile(localStore + "loginToken_" + chaincodeUsr)
+ if err != nil {
+ // Format the error appropriately for further processing
+ error := formatRPCError(InternalError.Code, InternalError.Message, fmt.Sprintf("Fatal error when reading client login token: %s", err))
+ restLogger.Errorf("Fatal error when reading client login token: %s", err)
+
+ return error
+ }
+
+ // Add the login token to the chaincodeSpec
+ spec.SecureContext = string(token)
+
+ // If privacy is enabled, mark chaincode as confidential
+ if viper.GetBool("security.privacy") {
+ spec.ConfidentialityLevel = pb.ConfidentialityLevel_CONFIDENTIAL
+ }
+ } else {
+ // Check if the token is not there and fail
+ if os.IsNotExist(err) {
+ // Format the error appropriately for further processing
+ error := formatRPCError(MissingRegistrationError.Code, MissingRegistrationError.Message, MissingRegistrationError.Data)
+ restLogger.Error(MissingRegistrationError.Data)
+
+ return error
+ }
+ // Unexpected error
+ // Format the error appropriately for further processing
+ error := formatRPCError(InternalError.Code, InternalError.Message, fmt.Sprintf("Unexpected fatal error when checking for client login token: %s", err))
+ restLogger.Errorf("Unexpected fatal error when checking for client login token: %s", err)
+
+ return error
+ }
+ }
+
+ //
+ // Trigger the chaincode deployment through the devops service
+ //
+ chaincodeDeploymentSpec, err := s.devops.Deploy(context.Background(), spec)
+
+ //
+ // Deployment failed
+ //
+
+ if err != nil {
+ // Format the error appropriately for further processing
+ error := formatRPCError(ChaincodeDeployError.Code, ChaincodeDeployError.Message, fmt.Sprintf("Error when deploying chaincode: %s", err))
+ restLogger.Errorf("Error when deploying chaincode: %s", err)
+
+ return error
+ }
+
+ //
+ // Deployment succeeded
+ //
+
+ // Clients will need the chaincode name in order to invoke or query it, record it
+ chainID := chaincodeDeploymentSpec.ChaincodeSpec.ChaincodeID.Name
+
+ //
+ // Output correctly formatted response
+ //
+
+ result := formatRPCOK(chainID)
+ restLogger.Infof("Successfully deployed chainCode: %s", chainID)
+
+ return result
+}
+
+// processChaincodeInvokeOrQuery triggers chaincode invoke or query and returns a result or an error
+func (s *ServerOpenchainREST) processChaincodeInvokeOrQuery(method string, spec *pb.ChaincodeInvocationSpec) rpcResult {
+ restLogger.Infof("REST %s chaincode...", method)
+
+ // Check that the ChaincodeID is not nil.
+ if spec.ChaincodeSpec.ChaincodeID == nil {
+ // Format the error appropriately for further processing
+ error := formatRPCError(InvalidParams.Code, InvalidParams.Message, "Payload must contain a ChaincodeID.")
+ restLogger.Error("Payload must contain a ChaincodeID.")
+
+ return error
+ }
+
+ // Check that the Chaincode name is not blank.
+ if spec.ChaincodeSpec.ChaincodeID.Name == "" {
+ // Format the error appropriately for further processing
+ error := formatRPCError(InvalidParams.Code, InvalidParams.Message, "Chaincode name may not be blank.")
+ restLogger.Error("Chaincode name may not be blank.")
+
+ return error
+ }
+
+ // Check that the CtorMsg is not left blank.
+ if (spec.ChaincodeSpec.CtorMsg == nil) || (spec.ChaincodeSpec.CtorMsg.Function == "") {
+ // Format the error appropriately for further processing
+ error := formatRPCError(InvalidParams.Code, InvalidParams.Message, "Payload must contain a CtorMsg with a Chaincode function name.")
+ restLogger.Error("Payload must contain a CtorMsg with a Chaincode function name.")
+
+ return error
+ }
+
+ //
+ // Check if security is enabled
+ //
+
+ if core.SecurityEnabled() {
+ // User registrationID must be present inside request payload with security enabled
+ chaincodeUsr := spec.ChaincodeSpec.SecureContext
+ if chaincodeUsr == "" {
+ // Format the error appropriately for further processing
+ error := formatRPCError(InvalidParams.Code, InvalidParams.Message, "Must supply username for chaincode when security is enabled.")
+ restLogger.Error("Must supply username for chaincode when security is enabled.")
+
+ return error
+ }
+
+ // Retrieve the REST data storage path
+ // Returns /var/hyperledger/production/client/
+ localStore := getRESTFilePath()
+
+ // Check if the user is logged in before sending transaction
+ if _, err := os.Stat(localStore + "loginToken_" + chaincodeUsr); err == nil {
+ // No error returned, therefore token exists so user is already logged in
+ restLogger.Infof("Local user '%s' is already logged in. Retrieving login token.\n", chaincodeUsr)
+
+ // Read in the login token
+ token, err := ioutil.ReadFile(localStore + "loginToken_" + chaincodeUsr)
+ if err != nil {
+ // Format the error appropriately for further processing
+ error := formatRPCError(InternalError.Code, InternalError.Message, fmt.Sprintf("Fatal error when reading client login token: %s", err))
+ restLogger.Errorf("Fatal error when reading client login token: %s", err)
+
+ return error
+ }
+
+ // Add the login token to the chaincodeSpec
+ spec.ChaincodeSpec.SecureContext = string(token)
+
+ // If privacy is enabled, mark chaincode as confidential
+ if viper.GetBool("security.privacy") {
+ spec.ChaincodeSpec.ConfidentialityLevel = pb.ConfidentialityLevel_CONFIDENTIAL
+ }
+ } else {
+ // Check if the token is not there and fail
+ if os.IsNotExist(err) {
+ // Format the error appropriately for further processing
+ error := formatRPCError(MissingRegistrationError.Code, MissingRegistrationError.Message, MissingRegistrationError.Data)
+ restLogger.Error(MissingRegistrationError.Data)
+
+ return error
+ }
+ // Unexpected error
+ // Format the error appropriately for further processing
+ error := formatRPCError(InternalError.Code, InternalError.Message, fmt.Sprintf("Unexpected fatal error when checking for client login token: %s", err))
+ restLogger.Errorf("Unexpected fatal error when checking for client login token: %s", err)
+
+ return error
+ }
+ }
+
+ //
+ // Create the result variable
+ //
+ var result rpcResult
+
+ // Check the method that is being requested and execute either an invoke or a query
+ if method == "invoke" {
+
+ //
+ // Trigger the chaincode invoke through the devops service
+ //
+
+ resp, err := s.devops.Invoke(context.Background(), spec)
+
+ //
+ // Invocation failed
+ //
+
+ if err != nil {
+ // Format the error appropriately for further processing
+ error := formatRPCError(ChaincodeInvokeError.Code, ChaincodeInvokeError.Message, fmt.Sprintf("Error when invoking chaincode: %s", err))
+ restLogger.Errorf("Error when invoking chaincode: %s", err)
+
+ return error
+ }
+
+ //
+ // Invocation succeeded
+ //
+
+ // Clients will need the txuuid in order to track it after invocation, record it
+ txuuid := string(resp.Msg)
+
+ //
+ // Output correctly formatted response
+ //
+
+ result = formatRPCOK(txuuid)
+ // Make a clarification in the invoke response message, that the transaction has been successfully submitted but not completed
+ restLogger.Infof("Successfully submitted invoke transaction with txuuid (%s)", txuuid)
+ }
+
+ if method == "query" {
+
+ //
+ // Trigger the chaincode query through the devops service
+ //
+
+ resp, err := s.devops.Query(context.Background(), spec)
+
+ //
+ // Query failed
+ //
+
+ if err != nil {
+ // Format the error appropriately for further processing
+ error := formatRPCError(ChaincodeQueryError.Code, ChaincodeQueryError.Message, fmt.Sprintf("Error when querying chaincode: %s", err))
+ restLogger.Errorf("Error when querying chaincode: %s", err)
+
+ return error
+ }
+
+ //
+ // Query succeeded
+ //
+
+ // Clients will need the returned value, record it
+ val := string(resp.Msg)
+
+ //
+ // Output correctly formatted response
+ //
+
+ result = formatRPCOK(val)
+ restLogger.Infof("Successfully queried chaincode: %s", val)
+ }
+
+ return result
+}
+
+// GetPeers returns a list of all peer nodes currently connected to the target peer, including itself
+func (s *ServerOpenchainREST) GetPeers(rw web.ResponseWriter, req *web.Request) {
+ peers, err := s.server.GetPeers(context.Background(), &google_protobuf.Empty{})
+ currentPeer, err1 := s.server.GetPeerEndpoint(context.Background(), &google_protobuf.Empty{})
+
+ encoder := json.NewEncoder(rw)
+
+ // Check for error
+ if err != nil {
+ // Failure
+ rw.WriteHeader(http.StatusBadRequest)
+ encoder.Encode(restResult{Error: err.Error()})
+ restLogger.Errorf("Error: Querying network peers -- %s", err)
+ } else if err1 != nil {
+ // Failure
+ rw.WriteHeader(http.StatusBadRequest)
+ encoder.Encode(restResult{Error: err1.Error()})
+ restLogger.Errorf("Error: Accesing target peer endpoint data -- %s", err1)
+ } else {
+ currentPeerFound := false
+ peersList := peers.Peers
+ for _, peer := range peers.Peers {
+ for _, cPeer := range currentPeer.Peers {
+ if *peer.GetID() == *cPeer.GetID() {
+ currentPeerFound = true
+ }
+ }
+ }
+ if currentPeerFound == false {
+ peersList = append(peersList, currentPeer.Peers...)
+ }
+ peersMessage := &pb.PeersMessage{Peers: peersList}
+ // Success
+ rw.WriteHeader(http.StatusOK)
+ encoder.Encode(peersMessage)
+ }
+}
+
+// NotFound returns a custom landing page when a given hyperledger end point
+// had not been defined.
+func (s *ServerOpenchainREST) NotFound(rw web.ResponseWriter, r *web.Request) {
+ rw.WriteHeader(http.StatusNotFound)
+ json.NewEncoder(rw).Encode(restResult{Error: "Openchain endpoint not found."})
+}
+
+func buildOpenchainRESTRouter() *web.Router {
+ router := web.New(ServerOpenchainREST{})
+
+ // Add middleware
+ router.Middleware((*ServerOpenchainREST).SetOpenchainServer)
+ router.Middleware((*ServerOpenchainREST).SetResponseType)
+
+ // Add routes
+ router.Post("/registrar", (*ServerOpenchainREST).Register)
+ router.Get("/registrar/:id", (*ServerOpenchainREST).GetEnrollmentID)
+ router.Delete("/registrar/:id", (*ServerOpenchainREST).DeleteEnrollmentID)
+ router.Get("/registrar/:id/ecert", (*ServerOpenchainREST).GetEnrollmentCert)
+ router.Get("/registrar/:id/tcert", (*ServerOpenchainREST).GetTransactionCert)
+
+ router.Get("/chain", (*ServerOpenchainREST).GetBlockchainInfo)
+ router.Get("/chain/blocks/:id", (*ServerOpenchainREST).GetBlockByNumber)
+
+ // The /devops endpoint is now considered deprecated and superseded by the /chaincode endpoint
+ router.Post("/devops/deploy", (*ServerOpenchainREST).Deploy)
+ router.Post("/devops/invoke", (*ServerOpenchainREST).Invoke)
+ router.Post("/devops/query", (*ServerOpenchainREST).Query)
+
+ // The /chaincode endpoint which superceedes the /devops endpoint from above
+ router.Post("/chaincode", (*ServerOpenchainREST).ProcessChaincode)
+
+ router.Get("/transactions/:uuid", (*ServerOpenchainREST).GetTransactionByUUID)
+
+ router.Get("/network/peers", (*ServerOpenchainREST).GetPeers)
+
+ // Add not found page
+ router.NotFound((*ServerOpenchainREST).NotFound)
+
+ return router
+}
+
+// StartOpenchainRESTServer initializes the REST service and adds the required
+// middleware and routes.
+func StartOpenchainRESTServer(server *ServerOpenchain, devops *core.Devops) {
+ // Initialize the REST service object
+ restLogger.Infof("Initializing the REST service on %s, TLS is %s.", viper.GetString("rest.address"), (map[bool]string{true: "enabled", false: "disabled"})[comm.TLSEnabled()])
+
+ // Record the pointer to the underlying ServerOpenchain and Devops objects.
+ serverOpenchain = server
+ serverDevops = devops
+
+ router := buildOpenchainRESTRouter()
+
+ // Start server
+ if comm.TLSEnabled() {
+ err := http.ListenAndServeTLS(viper.GetString("rest.address"), viper.GetString("peer.tls.cert.file"), viper.GetString("peer.tls.key.file"), router)
+ if err != nil {
+ restLogger.Errorf("ListenAndServeTLS: %s", err)
+ }
+ } else {
+ err := http.ListenAndServe(viper.GetString("rest.address"), router)
+ if err != nil {
+ restLogger.Errorf("ListenAndServe: %s", err)
+ }
+ }
+}
diff --git a/core/rest/rest_api.json b/core/rest/rest_api.json
new file mode 100644
index 00000000000..29e9525e43a
--- /dev/null
+++ b/core/rest/rest_api.json
@@ -0,0 +1,839 @@
+{
+ "swagger": "2.0",
+ "info": {
+ "title": "Hyperledger Fabric API",
+ "description": "Interact with the enterprise blockchain through Hyperledger Fabric API",
+ "version": "1.0.0"
+ },
+ "host": "127.0.0.1:5000",
+ "schemes": [
+ "http"
+ ],
+ "produces": [
+ "application/json"
+ ],
+ "paths": {
+ "/chain": {
+ "get": {
+ "summary": "Blockchain information",
+ "description": "The Chain endpoint returns information about the current state of the blockchain such as the height, the current block hash, and the previous block hash.",
+ "tags": [
+ "Blockchain"
+ ],
+ "operationId": "getChain",
+ "responses": {
+ "200": {
+ "description": "Blockchain information",
+ "schema": {
+ "$ref": "#/definitions/BlockchainInfo"
+ }
+ },
+ "default": {
+ "description": "Unexpected error",
+ "schema": {
+ "$ref": "#/definitions/Error"
+ }
+ }
+ }
+ }
+ },
+ "/chain/blocks/{Block}": {
+ "get": {
+ "summary": "Individual block information",
+ "description": "The {Block} endpoint returns information about a specific block within the Blockchain. Note that the genesis block is block zero.",
+ "tags": [
+ "Block"
+ ],
+ "operationId": "getBlock",
+ "parameters": [{
+ "name": "Block",
+ "in": "path",
+ "description": "Block number to retrieve",
+ "type": "integer",
+ "format": "uint64",
+ "required": true
+ }],
+ "responses": {
+ "200": {
+ "description": "Individual Block contents",
+ "schema": {
+ "$ref": "#/definitions/Block"
+ }
+ },
+ "default": {
+ "description": "Unexpected error",
+ "schema": {
+ "$ref": "#/definitions/Error"
+ }
+ }
+ }
+ }
+ },
+ "/transactions/{UUID}": {
+ "get": {
+ "summary": "Individual transaction contents",
+ "description": "The /transactions/{UUID} endpoint returns the transaction matching the specified UUID.",
+ "tags": [
+ "Transactions"
+ ],
+ "operationId": "getTransaction",
+ "parameters": [{
+ "name": "UUID",
+ "in": "path",
+ "description": "Transaction to retrieve from the blockchain.",
+ "type": "string",
+ "required": true
+ }],
+ "responses": {
+ "200": {
+ "description": "Individual Transaction contents",
+ "schema": {
+ "$ref": "#/definitions/Transaction"
+ }
+ },
+ "default": {
+ "description": "Unexpected error",
+ "schema": {
+ "$ref": "#/definitions/Error"
+ }
+ }
+ }
+ }
+ },
+ "/devops/deploy": {
+ "post": {
+ "summary": "[DEPRECATED] Service endpoint for deploying Chaincode [DEPRECATED]",
+ "description": "The /devops/deploy endpoint receives Chaincode deployment requests. The Chaincode and the required entities are first packaged into a container and subsequently deployed to the blockchain. If the Chaincode build and deployment are successful, a confirmation message is returned. Otherwise, an error is displayed alongside with a reason for the failure. This service endpoint is being deprecated, please use the /chaincode endpoint instead.",
+ "tags": [
+ "Chaincode"
+ ],
+ "operationId": "chaincodeDeploy",
+ "parameters": [{
+ "name": "ChaincodeSpec",
+ "in": "body",
+ "description": "Chaincode specification message",
+ "required": true,
+ "schema": {
+ "$ref": "#/definitions/ChaincodeSpec"
+ }
+ }],
+ "responses": {
+ "200": {
+ "description": "Successfully deployed chainCode",
+ "schema": {
+ "$ref": "#/definitions/OK"
+ }
+ },
+ "default": {
+ "description": "Unexpected error",
+ "schema": {
+ "$ref": "#/definitions/Error"
+ }
+ }
+ }
+ }
+ },
+ "/devops/invoke": {
+ "post": {
+ "summary": "[DEPRECATED] Service endpoint for invoking Chaincode functions [DEPRECATED]",
+ "description": "The /devops/invoke endpoint receives requests for invoking functions in deployed Chaincodes. If the Chaincode function is invoked sucessfully, a transaction id is returned. Otherwise, an error is displayed alongside with a reason for the failure. This service endpoint is being deprecated, please use the /chaincode endpoint instead.",
+ "tags": [
+ "Chaincode"
+ ],
+ "operationId": "chaincodeInvoke",
+ "parameters": [{
+ "name": "ChaincodeInvocationSpec",
+ "in": "body",
+ "description": "Chaincode invocation message",
+ "required": true,
+ "schema": {
+ "$ref": "#/definitions/ChaincodeInvocationSpec"
+ }
+ }],
+ "responses": {
+ "200": {
+ "description": "Successfully invoked transaction",
+ "schema": {
+ "$ref": "#/definitions/OK"
+ }
+ },
+ "default": {
+ "description": "Unexpected error",
+ "schema": {
+ "$ref": "#/definitions/Error"
+ }
+ }
+ }
+ }
+ },
+ "/devops/query": {
+ "post": {
+ "summary": "[DEPRECATED] Service endpoint for querying Chaincode state [DEPRECATED]",
+ "description": "The /devops/query endpoint receives requests to query Chaincode state. The request triggers a query method on the target Chaincode, both identified in the required payload. If the query method is successful, the response defined within the method is returned. Otherwise, an error is displayed alongside with a reason for the failure. This service endpoint is being deprecated, please use the /chaincode endpoint instead.",
+ "tags": [
+ "Chaincode"
+ ],
+ "operationId": "chaincodeQuery",
+ "parameters": [{
+ "name": "ChaincodeInvocationSpec",
+ "in": "body",
+ "description": "Chaincode invocation message",
+ "required": true,
+ "schema": {
+ "$ref": "#/definitions/ChaincodeInvocationSpec"
+ }
+ }],
+ "responses": {
+ "200": {
+ "description": "Successfully queried chaincode",
+ "schema": {
+ "$ref": "#/definitions/OK"
+ }
+ },
+ "default": {
+ "description": "Unexpected error",
+ "schema": {
+ "$ref": "#/definitions/Error"
+ }
+ }
+ }
+ }
+ },
+ "/chaincode": {
+ "post": {
+ "summary": "Service endpoint for Chaincode operations",
+ "description": "The /chaincode endpoint receives requests to deploy, invoke, and query a target Chaincode. This service endpoint implements the JSON RPC 2.0 specification with the payload identifying the desired Chaincode operation within the 'method' field.",
+ "tags": [
+ "Chaincode"
+ ],
+ "operationId": "chaincodeOp",
+ "parameters": [{
+ "name": "ChaincodeOpPayload",
+ "in": "body",
+ "description": "Chaincode JSON RPC 2.0 payload",
+ "required": true,
+ "schema": {
+ "$ref": "#/definitions/ChaincodeOpPayload"
+ }
+ }],
+ "responses": {
+ "200": {
+ "description": "Chaincode operation successful",
+ "schema": {
+ "$ref": "#/definitions/ChaincodeOpSuccess"
+ }
+ },
+ "default": {
+ "description": "Chaincode operation failed",
+ "schema": {
+ "$ref": "#/definitions/ChaincodeOpFailure"
+ }
+ }
+ }
+ }
+ },
+ "/registrar": {
+ "post": {
+ "summary": "Register a user with the certificate authority",
+ "description": "The /registrar endpoint receives requests to register a user with the certificate authority. The request must supply the registration id and password within the payload. If the registration is successful, the required transaction certificates are received and stored locally. Otherwise, an error is displayed alongside with a reason for the failure.",
+ "tags": [
+ "Registrar"
+ ],
+ "operationId": "registerUser",
+ "parameters": [{
+ "name": "Secret",
+ "in": "body",
+ "description": "User enrollment credentials",
+ "required": true,
+ "schema": {
+ "$ref": "#/definitions/Secret"
+ }
+ }],
+ "responses": {
+ "200": {
+ "description": "Successfully registered user with the certificate authority",
+ "schema": {
+ "$ref": "#/definitions/OK"
+ }
+ },
+ "default": {
+ "description": "Unexpected error",
+ "schema": {
+ "$ref": "#/definitions/Error"
+ }
+ }
+ }
+ }
+ },
+ "/registrar/{enrollmentID}": {
+ "get": {
+ "summary": "Confirm the user has registered with the certificate authority",
+ "description": "The /registrar/{enrollmentID} endpoint confirms whether the specified user has registered with the certificate authority. If the user has registered, a confirmation message will be returned. Otherwise, an authorization failure will result.",
+ "tags": [
+ "Registrar"
+ ],
+ "operationId": "getUserRegistration",
+ "parameters": [{
+ "name": "enrollmentID",
+ "in": "path",
+ "description": "Username for which registration is to be confirmed",
+ "type": "string",
+ "required": true
+ }],
+ "responses": {
+ "200": {
+ "description": "Confirm registration for target user",
+ "schema": {
+ "$ref": "#/definitions/OK"
+ }
+ },
+ "default": {
+ "description": "Unexpected error",
+ "schema": {
+ "$ref": "#/definitions/Error"
+ }
+ }
+ }
+ },
+ "delete": {
+ "summary": "Delete user login tokens from local storage",
+ "description": "The /registrar/{enrollmentID} endpoint deletes any existing client login tokens from local storage. After the completion of this request, the target user will no longer be able to execute transactions.",
+ "tags": [
+ "Registrar"
+ ],
+ "operationId": "deleteUserRegistration",
+ "parameters": [{
+ "name": "enrollmentID",
+ "in": "path",
+ "description": "Username for which login tokens are to be deleted",
+ "type": "string",
+ "required": true
+ }],
+ "responses": {
+ "200": {
+ "description": "Confirm deletion of user login tokens",
+ "schema": {
+ "$ref": "#/definitions/OK"
+ }
+ },
+ "default": {
+ "description": "Unexpected error",
+ "schema": {
+ "$ref": "#/definitions/Error"
+ }
+ }
+ }
+ }
+ },
+ "/registrar/{enrollmentID}/ecert": {
+ "get": {
+ "summary": "Retrieve user enrollment certificate",
+ "description": "The /registrar/{enrollmentID}/ecert endpoint retrieves the enrollment certificate for a given user that has registered with the certificate authority. If the user has registered, a confirmation message will be returned containing the URL-encoded enrollment certificate. Otherwise, an error will result.",
+ "tags": [
+ "Registrar"
+ ],
+ "operationId": "getUserEnrollmentCertificate",
+ "parameters": [{
+ "name": "enrollmentID",
+ "in": "path",
+ "description": "EnrollmentID for which the certificate is requested",
+ "type": "string",
+ "required": true
+ }],
+ "responses": {
+ "200": {
+ "description": "Confirm registration for target user and return the URL-encoded enrollment certificate",
+ "schema": {
+ "$ref": "#/definitions/OK"
+ }
+ },
+ "default": {
+ "description": "Unexpected error",
+ "schema": {
+ "$ref": "#/definitions/Error"
+ }
+ }
+ }
+ }
+ },
+ "/registrar/{enrollmentID}/tcert": {
+ "get": {
+ "summary": "Retrieve user transaction certificates",
+ "description": "The /registrar/{enrollmentID}/tcert endpoint retrieves the transaction certificates for a given user that has registered with the certificate authority. If the user has registered, a confirmation message will be returned containing an array of URL-encoded transaction certificates. Otherwise, an error will result. The desired number of transaction certificates is specified with the optional 'count' query parameter. The default number of returned transaction certificates is 1 and 500 is the maximum number of certificates that can be retrieved with a single request.",
+ "tags": [
+ "Registrar"
+ ],
+ "operationId": "getUserTransactionCertificate",
+ "parameters": [{
+ "name": "enrollmentID",
+ "in": "path",
+ "description": "EnrollmentID for which the certificate is requested",
+ "type": "string",
+ "required": true
+ },
+ {
+ "name": "count",
+ "in": "query",
+ "description": "The desired number of transaction certificates. The default number of returned transaction certificates is 1 and 500 is the maximum number of certificates that can be retrieved with a single request",
+ "type": "string"
+ }],
+ "responses": {
+ "200": {
+ "description": "Confirm registration for target user and return the desired number of URL-encoded transaction certificates",
+ "schema": {
+ "$ref": "#/definitions/OK"
+ }
+ },
+ "default": {
+ "description": "Unexpected error",
+ "schema": {
+ "$ref": "#/definitions/Error"
+ }
+ }
+ }
+ }
+ },
+ "/network/peers": {
+ "get": {
+ "summary": "List of network peers",
+ "description": "The /network/peers endpoint returns a list of all existing network connections for the target peer node. The list includes both validating and non-validating peers.",
+ "tags": [
+ "Network"
+ ],
+ "operationId": "getPeers",
+ "responses": {
+ "200": {
+ "description": "List of network peers",
+ "schema": {
+ "$ref": "#/definitions/PeersMessage"
+ }
+ },
+ "default": {
+ "description": "Unexpected error",
+ "schema": {
+ "$ref": "#/definitions/Error"
+ }
+ }
+ }
+ }
+ }
+ },
+ "definitions": {
+ "BlockchainInfo": {
+ "type": "object",
+ "properties": {
+ "height": {
+ "type": "integer",
+ "format": "uint64",
+ "description": "Current height of the blockchain."
+ },
+ "currentBlockHash": {
+ "type": "string",
+ "format": "bytes",
+ "description": "Hash of the last block in the blockchain."
+ },
+ "previousBlockHash": {
+ "type": "string",
+ "format": "bytes",
+ "description": "Hash of the previous block in the blockchain."
+ }
+ }
+ },
+ "Block": {
+ "type": "object",
+ "properties": {
+ "proposerID": {
+ "type": "string",
+ "description": "Creator/originator of the block."
+ },
+ "timestamp": {
+ "$ref": "#/definitions/Timestamp",
+ "description": "Time of block creation."
+ },
+ "transactions": {
+ "type": "array",
+ "items": {
+ "$ref": "#/definitions/Transaction"
+ }
+ },
+ "stateHash": {
+ "type": "string",
+ "format": "bytes",
+ "description": "Global state hash after executing all transactions in the block."
+ },
+ "previousBlockHash": {
+ "type": "string",
+ "format": "bytes",
+ "description": "Hash of the previous block in the blockchain."
+ },
+ "consensusMetadata": {
+ "type": "string",
+ "format": "bytes",
+ "description": "Metadata required for consensus."
+ },
+ "nonHashData": {
+ "type": "string",
+ "format": "bytes",
+ "description": "Data stored in the block, but excluded from the computation of block hash."
+ }
+ }
+ },
+ "Transaction": {
+ "type": "object",
+ "properties": {
+ "type": {
+ "type": "string",
+ "default": "UNDEFINED",
+ "example": "UNDEFINED",
+ "enum":[
+ "UNDEFINED",
+ "CHAINCODE_DEPLOY",
+ "CHAINCODE_INVOKE",
+ "CHAINCODE_QUERY",
+ "CHAINCODE_TERMINATE"
+ ],
+ "description": "Transaction type."
+ },
+ "chaincodeID": {
+ "type": "string",
+ "format": "bytes",
+ "description": "Chaincode identifier as bytes."
+ },
+ "payload": {
+ "type": "string",
+ "format": "bytes",
+ "description": "Payload supplied for Chaincode function execution."
+ },
+ "uuid": {
+ "type": "string",
+ "description": "Unique transaction identifier."
+ },
+ "timestamp": {
+ "$ref": "#/definitions/Timestamp",
+ "description": "Time at which the chanincode becomes executable."
+ },
+ "confidentialityLevel": {
+ "$ref": "#/definitions/ConfidentialityLevel",
+ "description": "Confidentiality level of the Chaincode."
+ },
+ "nonce": {
+ "type": "string",
+ "format": "bytes",
+ "description": "Nonce value generated for this transaction."
+ },
+ "cert": {
+ "type": "string",
+ "format": "bytes",
+ "description": "Certificate of client sending the transaction."
+ },
+ "signature": {
+ "type": "string",
+ "format": "bytes",
+ "description": "Signature of client sending the transaction."
+ }
+ }
+ },
+ "ChaincodeID": {
+ "type": "object",
+ "properties": {
+ "path": {
+ "type": "string",
+ "description": "Chaincode location in the file system. This value is required by the deploy transaction."
+ },
+ "name": {
+ "type": "string",
+ "description": "Chaincode name identifier. This value is required by the invoke and query transactions."
+ }
+ }
+ },
+ "ChaincodeSpec": {
+ "type": "object",
+ "properties": {
+ "type": {
+ "type": "string",
+ "default": "GOLANG",
+ "example": "GOLANG",
+ "enum":[
+ "UNDEFINED",
+ "GOLANG",
+ "NODE",
+ "JAVA"
+ ],
+ "description": "Chaincode specification language."
+ },
+ "chaincodeID": {
+ "$ref": "#/definitions/ChaincodeID",
+ "description": "Unique Chaincode identifier."
+ },
+ "ctorMsg": {
+ "$ref": "#/definitions/ChaincodeInput",
+ "description": "Specific function to execute within the Chaincode."
+ },
+ "secureContext": {
+ "type": "string",
+ "description": "Username when security is enabled."
+ },
+ "confidentialityLevel": {
+ "$ref": "#/definitions/ConfidentialityLevel",
+ "description": "Confidentiality level of the Chaincode."
+ }
+ }
+ },
+ "ChaincodeInvocationSpec": {
+ "type": "object",
+ "properties": {
+ "chaincodeSpec": {
+ "$ref": "#/definitions/ChaincodeSpec",
+ "description": "Chaincode specification message."
+ }
+ }
+ },
+ "ChaincodeOpPayload": {
+ "type": "object",
+ "properties": {
+ "jsonrpc": {
+ "type": "string",
+ "default": "2.0",
+ "description": "A string specifying the version of the JSON-RPC protocol. Must be exactly '2.0'."
+ },
+ "method": {
+ "type": "string",
+ "description": "A string containing the name of the method to be invoked. Must be 'deploy', 'invoke', or 'query'."
+ },
+ "params": {
+ "$ref": "#/definitions/ChaincodeSpec",
+ "description": "A required Chaincode specification message identifying the target chaincode."
+ },
+ "id": {
+ "type": "integer",
+ "format": "int64",
+ "description": "An integer number used to correlate the request and response objects. If it is not included, the request is assumed to be a notification and the server will not generate a response."
+ }
+ },
+ "required": [
+ "jsonrpc",
+ "method",
+ "params",
+ "id"
+ ]
+ },
+ "ConfidentialityLevel":{
+ "type": "string",
+ "default": "PUBLIC",
+ "example": "PUBLIC",
+ "enum":[
+ "PUBLIC",
+ "CONFIDENTIAL"
+ ],
+ "description": "Confidentiality level of the Chaincode."
+ },
+ "ChaincodeInput": {
+ "type": "object",
+ "properties": {
+ "function": {
+ "type": "string",
+ "description": "Function to execute within the Chaincode."
+ },
+ "args": {
+ "type": "array",
+ "items": {
+ "type": "string"
+ },
+ "description": "Arguments supplied to the Chaincode function."
+ }
+ }
+ },
+ "Secret": {
+ "type": "object",
+ "properties": {
+ "enrollId": {
+ "type": "string",
+ "description": "User enrollment id registered with the certificate authority."
+ },
+ "enrollSecret": {
+ "type": "string",
+ "description": "User enrollment password registered with the certificate authority."
+ }
+ }
+ },
+ "Timestamp": {
+ "type": "object",
+ "properties": {
+ "seconds": {
+ "type": "integer",
+ "format": "int64",
+ "description": "Represents seconds of UTC time since Unix epoch 1970-01-01T00:00:00Z. Must be from from 0001-01-01T00:00:00Z to 9999-12-31T23:59:59Z inclusive."
+ },
+ "nanos": {
+ "type": "integer",
+ "format": "int32",
+ "description": "Non-negative fractions of a second at nanosecond resolution. Negative second values with fractions must still have non-negative nanos values that count forward in time. Must be from 0 to 999,999,999 inclusive."
+ }
+ }
+ },
+ "PeersMessage": {
+ "type": "object",
+ "properties": {
+ "peers": {
+ "type": "array",
+ "items": {
+ "$ref": "#/definitions/PeerEndpoint"
+ }
+ }
+ }
+ },
+ "PeerEndpoint": {
+ "type": "object",
+ "properties": {
+ "ID": {
+ "$ref": "#/definitions/PeerID",
+ "description": "Unique peer identifier."
+ },
+ "address": {
+ "type": "string",
+ "description": "ipaddress:port combination identifying a network peer."
+ },
+ "type": {
+ "type": "string",
+ "default": "UNDEFINED",
+ "example": "UNDEFINED",
+ "enum":[
+ "UNDEFINED",
+ "VALIDATOR",
+ "NON_VALIDATOR"
+ ],
+ "description": "Network peer type."
+ },
+ "pkiID": {
+ "type": "string",
+ "format": "bytes",
+ "description": "PKI identifier for the network peer."
+ }
+ }
+ },
+ "PeerID": {
+ "type": "object",
+ "properties": {
+ "name": {
+ "type": "string",
+ "description": "Name which uniquely identifies a network peer."
+ }
+ }
+ },
+ "Error": {
+ "type": "object",
+ "properties": {
+ "Error": {
+ "type": "string",
+ "description": "A descriptive message explaining the cause of error."
+ }
+ }
+ },
+ "OK": {
+ "type": "object",
+ "properties": {
+ "OK": {
+ "type": "string",
+ "description": "A descriptive message confirming a successful request."
+ },
+ "message": {
+ "type": "string",
+ "description": "An optional parameter containing additional information about the request."
+ }
+ }
+ },
+ "ChaincodeOpSuccess": {
+ "type": "object",
+ "properties": {
+ "jsonrpc": {
+ "type": "string",
+ "default": "2.0",
+ "description": "A string specifying the version of the JSON-RPC protocol. Must be exactly '2.0'."
+ },
+ "result": {
+ "$ref": "#/definitions/rpcResponse",
+ "description": "The value of this element is determined by the method invoked on the server."
+ },
+ "id": {
+ "type": "integer",
+ "format": "int64",
+ "default": 123,
+ "description": "This number will be the same as the value of the id member in the request object."
+ }
+ },
+ "required": [
+ "jsonrpc",
+ "result",
+ "id"
+ ]
+ },
+ "ChaincodeOpFailure": {
+ "type": "object",
+ "properties": {
+ "jsonrpc": {
+ "type": "string",
+ "default": "2.0",
+ "description": "A string specifying the version of the JSON-RPC protocol. Must be exactly '2.0'."
+ },
+ "error": {
+ "$ref": "#/definitions/rpcError",
+ "description": "A structured value specifying the code and description of the error that occurred."
+ },
+ "id": {
+ "type": "integer",
+ "format": "int64",
+ "default": 123,
+ "description": "This number will be the same as the value of the id member in the request object. If there was an error detecting the id in the request object (e.g. Parse error/Invalid Request), it will be null."
+ }
+ },
+ "required": [
+ "jsonrpc",
+ "error",
+ "id"
+ ]
+ },
+ "rpcResponse": {
+ "type": "object",
+ "properties": {
+ "Status": {
+ "type": "string",
+ "default": "OK",
+ "description": "A string confirming successful request execution."
+ },
+ "Message": {
+ "type": "string",
+ "default": "500",
+ "description": "Additional information about the response or values returned."
+ }
+ },
+ "required": [
+ "Status"
+ ]
+ },
+ "rpcError": {
+ "type": "object",
+ "properties": {
+ "code": {
+ "type": "integer",
+ "format": "int64",
+ "default": -32700,
+ "description": "A number that indicates the error type that occurred."
+ },
+ "message": {
+ "type": "string",
+ "default": "Parse error",
+ "description": "A string providing a short description of the error."
+ },
+ "data": {
+ "type": "string",
+ "default": "Error unmarshalling chaincode request payload: unexpected end of JSON input",
+ "description": "A primitive or structured value that contains additional information about the error (e.g. detailed error information, nested errors etc.)."
+ }
+ },
+ "required": [
+ "code",
+ "message"
+ ]
+ }
+ }
+}
diff --git a/core/rest/rest_api_test.go b/core/rest/rest_api_test.go
new file mode 100644
index 00000000000..0eec300a9c3
--- /dev/null
+++ b/core/rest/rest_api_test.go
@@ -0,0 +1,777 @@
+/*
+Copyright IBM Corp. 2016 All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package rest
+
+import (
+ "bytes"
+ "encoding/json"
+ "fmt"
+ "io/ioutil"
+ "net/http"
+ "net/http/httptest"
+ "os"
+ "testing"
+ "time"
+
+ "golang.org/x/net/context"
+
+ "github.com/hyperledger/fabric/core/ledger"
+ "github.com/hyperledger/fabric/protos"
+)
+
+func performHTTPGet(t *testing.T, url string) []byte {
+ response, err := http.Get(url)
+ if err != nil {
+ t.Fatalf("Error attempt to GET %s: %v", url, err)
+ }
+ body, err := ioutil.ReadAll(response.Body)
+ response.Body.Close()
+ if err != nil {
+ t.Fatalf("Error reading HTTP resposne body: %v", err)
+ }
+ return body
+}
+
+func performHTTPPost(t *testing.T, url string, requestBody []byte) (*http.Response, []byte) {
+ response, err := http.Post(url, "application/json", bytes.NewReader(requestBody))
+ if err != nil {
+ t.Fatalf("Error attempt to POST %s: %v", url, err)
+ }
+ body, err := ioutil.ReadAll(response.Body)
+ response.Body.Close()
+ if err != nil {
+ t.Fatalf("Error reading HTTP resposne body: %v", err)
+ }
+ return response, body
+}
+
+func performHTTPDelete(t *testing.T, url string) []byte {
+ req, err := http.NewRequest(http.MethodDelete, url, nil)
+ if err != nil {
+ t.Fatalf("Error building a DELETE request")
+ }
+ response, err := http.DefaultClient.Do(req)
+ if err != nil {
+ t.Fatalf("Error attempt to DELETE %s: %v", url, err)
+ }
+ body, err := ioutil.ReadAll(response.Body)
+ response.Body.Close()
+ if err != nil {
+ t.Fatalf("Error reading HTTP resposne body: %v", err)
+ }
+ return body
+}
+
+func parseRESTResult(t *testing.T, body []byte) restResult {
+ var res restResult
+ err := json.Unmarshal(body, &res)
+ if err != nil {
+ t.Fatalf("Invalid JSON response: %v", err)
+ }
+ return res
+}
+
+func parseRPCResponse(t *testing.T, body []byte) rpcResponse {
+ var res rpcResponse
+ err := json.Unmarshal(body, &res)
+ if err != nil {
+ t.Fatalf("Invalid JSON RPC response: %v", err)
+ }
+ return res
+}
+
+type mockDevops struct {
+}
+
+func (d *mockDevops) Login(c context.Context, s *protos.Secret) (*protos.Response, error) {
+ if s.EnrollSecret == "wrong_password" {
+ return &protos.Response{Status: protos.Response_FAILURE, Msg: []byte("Wrong mock password")}, nil
+ }
+ return &protos.Response{Status: protos.Response_SUCCESS}, nil
+}
+
+func (d *mockDevops) Build(c context.Context, cs *protos.ChaincodeSpec) (*protos.ChaincodeDeploymentSpec, error) {
+ return nil, nil
+}
+
+func (d *mockDevops) Deploy(c context.Context, spec *protos.ChaincodeSpec) (*protos.ChaincodeDeploymentSpec, error) {
+ if spec.ChaincodeID.Path == "non-existing" {
+ return nil, fmt.Errorf("Deploy failure on non-existing path")
+ }
+ spec.ChaincodeID.Name = "new_name_for_deployed_chaincode"
+ return &protos.ChaincodeDeploymentSpec{ChaincodeSpec: spec, CodePackage: []byte{}}, nil
+}
+
+func (d *mockDevops) Invoke(c context.Context, cis *protos.ChaincodeInvocationSpec) (*protos.Response, error) {
+ switch cis.ChaincodeSpec.CtorMsg.Function {
+ case "fail":
+ return nil, fmt.Errorf("Invoke failure")
+ case "change_owner":
+ return &protos.Response{Status: protos.Response_SUCCESS, Msg: []byte("change_owner_invoke_result")}, nil
+ }
+ return nil, fmt.Errorf("Unknown function invoked")
+}
+
+func (d *mockDevops) Query(c context.Context, cis *protos.ChaincodeInvocationSpec) (*protos.Response, error) {
+ switch cis.ChaincodeSpec.CtorMsg.Function {
+ case "fail":
+ return nil, fmt.Errorf("Query failure with special-\" chars")
+ case "get_owner":
+ return &protos.Response{Status: protos.Response_SUCCESS, Msg: []byte("get_owner_query_result")}, nil
+ }
+ return nil, fmt.Errorf("Unknown query function")
+}
+
+func (d *mockDevops) EXP_GetApplicationTCert(ctx context.Context, secret *protos.Secret) (*protos.Response, error) {
+ return nil, nil
+}
+
+func (d *mockDevops) EXP_PrepareForTx(ctx context.Context, secret *protos.Secret) (*protos.Response, error) {
+ return nil, nil
+}
+
+func (d *mockDevops) EXP_ProduceSigma(ctx context.Context, sigmaInput *protos.SigmaInput) (*protos.Response, error) {
+ return nil, nil
+}
+
+func (d *mockDevops) EXP_ExecuteWithBinding(ctx context.Context, executeWithBinding *protos.ExecuteWithBinding) (*protos.Response, error) {
+ return nil, nil
+}
+
+func initGlobalServerOpenchain(t *testing.T) {
+ var err error
+ serverOpenchain, err = NewOpenchainServerWithPeerInfo(new(peerInfo))
+ if err != nil {
+ t.Fatalf("Error creating OpenchainServer: %s", err)
+ }
+ serverDevops = new(mockDevops)
+}
+
+func TestServerOpenchainREST_API_GetBlockchainInfo(t *testing.T) {
+ // Construct a ledger with 0 blocks.
+ ledger := ledger.InitTestLedger(t)
+
+ initGlobalServerOpenchain(t)
+
+ // Start the HTTP REST test server
+ httpServer := httptest.NewServer(buildOpenchainRESTRouter())
+ defer httpServer.Close()
+
+ body := performHTTPGet(t, httpServer.URL+"/chain")
+ res := parseRESTResult(t, body)
+ if res.Error == "" {
+ t.Errorf("Expected an error when retrieving empty blockchain, but got none")
+ }
+
+ // add 3 blocks to the ledger
+ buildTestLedger1(ledger, t)
+
+ body3 := performHTTPGet(t, httpServer.URL+"/chain")
+ var blockchainInfo3 protos.BlockchainInfo
+ err := json.Unmarshal(body3, &blockchainInfo3)
+ if err != nil {
+ t.Fatalf("Invalid JSON response: %v", err)
+ }
+ if blockchainInfo3.Height != 3 {
+ t.Errorf("Expected blockchain height to be 3 but got %v", blockchainInfo3.Height)
+ }
+
+ // add 5 more blocks more to the ledger
+ buildTestLedger2(ledger, t)
+
+ body8 := performHTTPGet(t, httpServer.URL+"/chain")
+ var blockchainInfo8 protos.BlockchainInfo
+ err = json.Unmarshal(body8, &blockchainInfo8)
+ if err != nil {
+ t.Fatalf("Invalid JSON response: %v", err)
+ }
+ if blockchainInfo8.Height != 8 {
+ t.Errorf("Expected blockchain height to be 8 but got %v", blockchainInfo8.Height)
+ }
+}
+
+func TestServerOpenchainREST_API_GetBlockByNumber(t *testing.T) {
+ // Construct a ledger with 0 blocks.
+ ledger := ledger.InitTestLedger(t)
+
+ initGlobalServerOpenchain(t)
+
+ // Start the HTTP REST test server
+ httpServer := httptest.NewServer(buildOpenchainRESTRouter())
+ defer httpServer.Close()
+
+ body := performHTTPGet(t, httpServer.URL+"/chain/blocks/0")
+ res := parseRESTResult(t, body)
+ if res.Error == "" {
+ t.Errorf("Expected an error when retrieving block 0 of an empty blockchain, but got none")
+ }
+
+ // add 3 blocks to the ledger
+ buildTestLedger1(ledger, t)
+
+ // Retrieve the first block from the blockchain (block number = 0)
+ body0 := performHTTPGet(t, httpServer.URL+"/chain/blocks/0")
+ var block0 protos.Block
+ err := json.Unmarshal(body0, &block0)
+ if err != nil {
+ t.Fatalf("Invalid JSON response: %v", err)
+ }
+
+ // Retrieve the 3rd block from the blockchain (block number = 2)
+ body2 := performHTTPGet(t, httpServer.URL+"/chain/blocks/2")
+ var block2 protos.Block
+ err = json.Unmarshal(body2, &block2)
+ if err != nil {
+ t.Fatalf("Invalid JSON response: %v", err)
+ }
+ if len(block2.Transactions) != 2 {
+ t.Errorf("Expected block to contain 2 transactions but got %v", len(block2.Transactions))
+ }
+
+ // Retrieve the 5th block from the blockchain (block number = 4), which
+ // should fail because the ledger has only 3 blocks.
+ body4 := performHTTPGet(t, httpServer.URL+"/chain/blocks/4")
+ res4 := parseRESTResult(t, body4)
+ if res4.Error == "" {
+ t.Errorf("Expected an error when retrieving non-existing block, but got none")
+ }
+
+ // Illegal block number
+ body = performHTTPGet(t, httpServer.URL+"/chain/blocks/NOT_A_NUMBER")
+ res = parseRESTResult(t, body)
+ if res.Error == "" {
+ t.Errorf("Expected an error when URL doesn't have a number, but got none")
+ }
+
+ // Add a fake block number 9 and try to fetch non-existing block 6
+ ledger.PutRawBlock(&block0, 9)
+ body = performHTTPGet(t, httpServer.URL+"/chain/blocks/6")
+ res = parseRESTResult(t, body)
+ if res.Error == "" {
+ t.Errorf("Expected an error when block doesn't exist, but got none")
+ }
+}
+
+func TestServerOpenchainREST_API_GetTransactionByUUID(t *testing.T) {
+ startTime := time.Now().Unix()
+
+ // Construct a ledger with 3 blocks.
+ ledger := ledger.InitTestLedger(t)
+ buildTestLedger1(ledger, t)
+
+ initGlobalServerOpenchain(t)
+
+ // Start the HTTP REST test server
+ httpServer := httptest.NewServer(buildOpenchainRESTRouter())
+ defer httpServer.Close()
+
+ body := performHTTPGet(t, httpServer.URL+"/transactions/NON-EXISTING-UUID")
+ res := parseRESTResult(t, body)
+ if res.Error == "" {
+ t.Errorf("Expected an error when retrieving non-existing transaction, but got none")
+ }
+
+ block1, err := ledger.GetBlockByNumber(1)
+ if err != nil {
+ t.Fatalf("Can't fetch first block from ledger: %v", err)
+ }
+ firstTx := block1.Transactions[0]
+
+ body1 := performHTTPGet(t, httpServer.URL+"/transactions/"+firstTx.Uuid)
+ var tx1 protos.Transaction
+ err = json.Unmarshal(body1, &tx1)
+ if err != nil {
+ t.Fatalf("Invalid JSON response: %v", err)
+ }
+ if tx1.Uuid != firstTx.Uuid {
+ t.Errorf("Expected transaction uuid to be '%v' but got '%v'", firstTx.Uuid, tx1.Uuid)
+ }
+ if tx1.Timestamp.Seconds < startTime {
+ t.Errorf("Expected transaction timestamp (%v) to be after the start time (%v)", tx1.Timestamp.Seconds, startTime)
+ }
+
+ badBody := performHTTPGet(t, httpServer.URL+"/transactions/with-\"-chars-in-the-URL")
+ badRes := parseRESTResult(t, badBody)
+ if badRes.Error == "" {
+ t.Errorf("Expected a proper error when retrieving transaction with bad UUID")
+ }
+}
+
+func TestServerOpenchainREST_API_Register(t *testing.T) {
+ os.RemoveAll(getRESTFilePath())
+ initGlobalServerOpenchain(t)
+
+ // Start the HTTP REST test server
+ httpServer := httptest.NewServer(buildOpenchainRESTRouter())
+ defer httpServer.Close()
+
+ expectError := func(reqBody string, expectedStatus int) {
+ httpResponse, body := performHTTPPost(t, httpServer.URL+"/registrar", []byte(reqBody))
+ if httpResponse.StatusCode != expectedStatus {
+ t.Errorf("Expected an HTTP status code %#v but got %#v", expectedStatus, httpResponse.StatusCode)
+ }
+ res := parseRESTResult(t, body)
+ if res.Error == "" {
+ t.Errorf("Expected a proper error when registering")
+ }
+ }
+
+ expectOK := func(reqBody string) {
+ httpResponse, body := performHTTPPost(t, httpServer.URL+"/registrar", []byte(reqBody))
+ if httpResponse.StatusCode != http.StatusOK {
+ t.Errorf("Expected an HTTP status code %#v but got %#v", http.StatusOK, httpResponse.StatusCode)
+ }
+ res := parseRESTResult(t, body)
+ if res.Error != "" {
+ t.Errorf("Expected no error but got: %v", res.Error)
+ }
+ }
+
+ expectError("", http.StatusBadRequest)
+ expectError("} invalid json ]", http.StatusBadRequest)
+ expectError(`{"enrollId":"user"}`, http.StatusBadRequest)
+ expectError(`{"enrollId":"user","enrollSecret":"wrong_password"}`, http.StatusUnauthorized)
+ expectOK(`{"enrollId":"user","enrollSecret":"password"}`)
+ expectOK(`{"enrollId":"user","enrollSecret":"password"}`) // Already logged-in
+}
+
+func TestServerOpenchainREST_API_GetEnrollmentID(t *testing.T) {
+ os.RemoveAll(getRESTFilePath())
+ initGlobalServerOpenchain(t)
+
+ // Start the HTTP REST test server
+ httpServer := httptest.NewServer(buildOpenchainRESTRouter())
+ defer httpServer.Close()
+
+ body := performHTTPGet(t, httpServer.URL+"/registrar/NON_EXISTING_USER")
+ res := parseRESTResult(t, body)
+ if res.Error != "User NON_EXISTING_USER must log in." {
+ t.Errorf("Expected an error when retrieving non-existing user, but got: %v", res.Error)
+ }
+
+ body = performHTTPGet(t, httpServer.URL+"/registrar/BAD-\"-CHARS")
+ res = parseRESTResult(t, body)
+ if res.Error != "Invalid enrollment ID parameter" {
+ t.Errorf("Expected an error when retrieving non-existing user, but got: %v", res.Error)
+ }
+
+ // Login
+ performHTTPPost(t, httpServer.URL+"/registrar", []byte(`{"enrollId":"myuser","enrollSecret":"password"}`))
+ body = performHTTPGet(t, httpServer.URL+"/registrar/myuser")
+ res = parseRESTResult(t, body)
+ if res.OK == "" || res.Error != "" {
+ t.Errorf("Expected no error when retrieving logged-in user, but got: %v", res.Error)
+ }
+}
+
+func TestServerOpenchainREST_API_DeleteEnrollmentID(t *testing.T) {
+ os.RemoveAll(getRESTFilePath())
+ initGlobalServerOpenchain(t)
+
+ // Start the HTTP REST test server
+ httpServer := httptest.NewServer(buildOpenchainRESTRouter())
+ defer httpServer.Close()
+
+ body := performHTTPDelete(t, httpServer.URL+"/registrar/NON_EXISTING_USER")
+ res := parseRESTResult(t, body)
+ if res.OK == "" || res.Error != "" {
+ t.Errorf("Expected no error when deleting non logged-in user, but got: %v", res.Error)
+ }
+
+ // Login
+ performHTTPPost(t, httpServer.URL+"/registrar", []byte(`{"enrollId":"myuser","enrollSecret":"password"}`))
+ body = performHTTPDelete(t, httpServer.URL+"/registrar/myuser")
+ res = parseRESTResult(t, body)
+ if res.OK == "" || res.Error != "" {
+ t.Errorf("Expected no error when deleting a logged-in user, but got: %v", res.Error)
+ }
+}
+
+func TestServerOpenchainREST_API_GetEnrollmentCert(t *testing.T) {
+ os.RemoveAll(getRESTFilePath())
+ initGlobalServerOpenchain(t)
+
+ // Start the HTTP REST test server
+ httpServer := httptest.NewServer(buildOpenchainRESTRouter())
+ defer httpServer.Close()
+
+ body := performHTTPGet(t, httpServer.URL+"/registrar/NON_EXISTING_USER/ecert")
+ res := parseRESTResult(t, body)
+ if res.Error == "" {
+ t.Errorf("Expected an error when retrieving non-existing user, but got none")
+ }
+
+ body = performHTTPGet(t, httpServer.URL+"/registrar/BAD-\"-CHARS/ecert")
+ res = parseRESTResult(t, body)
+ if res.Error == "" {
+ t.Errorf("Expected an error when retrieving non-existing user, but got none")
+ }
+}
+
+func TestServerOpenchainREST_API_GetTransactionCert(t *testing.T) {
+ os.RemoveAll(getRESTFilePath())
+ initGlobalServerOpenchain(t)
+
+ // Start the HTTP REST test server
+ httpServer := httptest.NewServer(buildOpenchainRESTRouter())
+ defer httpServer.Close()
+
+ body := performHTTPGet(t, httpServer.URL+"/registrar/NON_EXISTING_USER/tcert")
+ res := parseRESTResult(t, body)
+ if res.Error == "" {
+ t.Errorf("Expected an error when retrieving non-existing user, but got none")
+ }
+
+ body = performHTTPGet(t, httpServer.URL+"/registrar/BAD-\"-CHARS/tcert")
+ res = parseRESTResult(t, body)
+ if res.Error == "" {
+ t.Errorf("Expected an error when retrieving non-existing user, but got none")
+ }
+}
+
+func TestServerOpenchainREST_API_GetPeers(t *testing.T) {
+ initGlobalServerOpenchain(t)
+
+ // Start the HTTP REST test server
+ httpServer := httptest.NewServer(buildOpenchainRESTRouter())
+ defer httpServer.Close()
+
+ body := performHTTPGet(t, httpServer.URL+"/network/peers")
+ var msg protos.PeersMessage
+ err := json.Unmarshal(body, &msg)
+ if err != nil {
+ t.Fatalf("Invalid JSON response: %v", err)
+ }
+ if len(msg.Peers) != 1 {
+ t.Errorf("Expected a list of 1 peer but got %d peers", len(msg.Peers))
+ }
+ if msg.Peers[0].ID.Name != "jdoe" {
+ t.Errorf("Expected a 'jdoe' peer but got '%s'", msg.Peers[0].ID.Name)
+ }
+}
+
+func TestServerOpenchainREST_API_Chaincode_InvalidRequests(t *testing.T) {
+ // Construct a ledger with 3 blocks.
+ ledger := ledger.InitTestLedger(t)
+ buildTestLedger1(ledger, t)
+
+ initGlobalServerOpenchain(t)
+
+ // Start the HTTP REST test server
+ httpServer := httptest.NewServer(buildOpenchainRESTRouter())
+ defer httpServer.Close()
+
+ // Test empty POST payload
+ httpResponse, body := performHTTPPost(t, httpServer.URL+"/chaincode", []byte{})
+ if httpResponse.StatusCode != http.StatusBadRequest {
+ t.Errorf("Expected an HTTP status code %#v but got %#v", http.StatusBadRequest, httpResponse.StatusCode)
+ }
+ res := parseRPCResponse(t, body)
+ if res.Error == nil || res.Error.Code != InvalidRequest.Code {
+ t.Errorf("Expected an error when sending empty payload, but got %#v", res.Error)
+ }
+
+ // Test invalid POST payload
+ httpResponse, body = performHTTPPost(t, httpServer.URL+"/chaincode", []byte("{,,,"))
+ if httpResponse.StatusCode != http.StatusBadRequest {
+ t.Errorf("Expected an HTTP status code %#v but got %#v", http.StatusBadRequest, httpResponse.StatusCode)
+ }
+ res = parseRPCResponse(t, body)
+ if res.Error == nil || res.Error.Code != ParseError.Code {
+ t.Errorf("Expected an error when sending invalid JSON payload, but got %#v", res.Error)
+ }
+
+ // Test request without ID (=notification) results in no response
+ httpResponse, body = performHTTPPost(t, httpServer.URL+"/chaincode", []byte(`{"jsonrpc":"2.0"}`))
+ if httpResponse.StatusCode != http.StatusOK {
+ t.Errorf("Expected an HTTP status code %#v but got %#v", http.StatusOK, httpResponse.StatusCode)
+ }
+ if len(body) != 0 {
+ t.Errorf("Expected an empty response body to notification, but got %#v", string(body))
+ }
+
+ // Test missing JSON RPC version
+ httpResponse, body = performHTTPPost(t, httpServer.URL+"/chaincode", []byte(`{"ID":123}`))
+ if httpResponse.StatusCode != http.StatusBadRequest {
+ t.Errorf("Expected an HTTP status code %#v but got %#v", http.StatusBadRequest, httpResponse.StatusCode)
+ }
+ res = parseRPCResponse(t, body)
+ if res.Error == nil || res.Error.Code != InvalidRequest.Code {
+ t.Errorf("Expected an error when sending missing jsonrpc version, but got %#v", res.Error)
+ }
+
+ // Test illegal JSON RPC version
+ httpResponse, body = performHTTPPost(t, httpServer.URL+"/chaincode", []byte(`{"jsonrpc":"0.0","ID":123}`))
+ if httpResponse.StatusCode != http.StatusBadRequest {
+ t.Errorf("Expected an HTTP status code %#v but got %#v", http.StatusBadRequest, httpResponse.StatusCode)
+ }
+ res = parseRPCResponse(t, body)
+ if res.Error == nil || res.Error.Code != InvalidRequest.Code {
+ t.Errorf("Expected an error when sending illegal jsonrpc version, but got %#v", res.Error)
+ }
+
+ // Test missing JSON RPC method
+ httpResponse, body = performHTTPPost(t, httpServer.URL+"/chaincode", []byte(`{"jsonrpc":"2.0","ID":123}`))
+ if httpResponse.StatusCode != http.StatusBadRequest {
+ t.Errorf("Expected an HTTP status code %#v but got %#v", http.StatusBadRequest, httpResponse.StatusCode)
+ }
+ res = parseRPCResponse(t, body)
+ if res.Error == nil || res.Error.Code != InvalidRequest.Code {
+ t.Errorf("Expected an error when sending missing jsonrpc method, but got %#v", res.Error)
+ }
+
+ // Test illegal JSON RPC method
+ httpResponse, body = performHTTPPost(t, httpServer.URL+"/chaincode", []byte(`{"jsonrpc":"2.0","ID":123,"method":"non_existing"}`))
+ if httpResponse.StatusCode != http.StatusNotFound {
+ t.Errorf("Expected an HTTP status code %#v but got %#v", http.StatusNotFound, httpResponse.StatusCode)
+ }
+ res = parseRPCResponse(t, body)
+ if res.Error == nil || res.Error.Code != MethodNotFound.Code {
+ t.Errorf("Expected an error when sending illegal jsonrpc method, but got %#v", res.Error)
+ }
+
+}
+
+func TestServerOpenchainREST_API_Chaincode_Deploy(t *testing.T) {
+ // Construct a ledger with 3 blocks.
+ ledger := ledger.InitTestLedger(t)
+ buildTestLedger1(ledger, t)
+
+ initGlobalServerOpenchain(t)
+
+ // Start the HTTP REST test server
+ httpServer := httptest.NewServer(buildOpenchainRESTRouter())
+ defer httpServer.Close()
+
+ // Test deploy without params
+ httpResponse, body := performHTTPPost(t, httpServer.URL+"/chaincode", []byte(`{"jsonrpc":"2.0","ID":123,"method":"deploy"}`))
+ if httpResponse.StatusCode != http.StatusBadRequest {
+ t.Errorf("Expected an HTTP status code %#v but got %#v", http.StatusBadRequest, httpResponse.StatusCode)
+ }
+ res := parseRPCResponse(t, body)
+ if res.Error == nil || res.Error.Code != InvalidParams.Code {
+ t.Errorf("Expected an error when sending missing params, but got %#v", res.Error)
+ }
+
+ // Login
+ performHTTPPost(t, httpServer.URL+"/registrar", []byte(`{"enrollId":"myuser","enrollSecret":"password"}`))
+
+ // Test deploy with invalid chaincode path
+ requestBody := `{
+ "jsonrpc": "2.0",
+ "ID": 123,
+ "method": "deploy",
+ "params": {
+ "type": 1,
+ "chaincodeID": {
+ "path": "non-existing"
+ },
+ "ctorMsg": {
+ "function": "Init",
+ "args": []
+ },
+ "secureContext": "myuser"
+ }
+ }`
+ httpResponse, body = performHTTPPost(t, httpServer.URL+"/chaincode", []byte(requestBody))
+ if httpResponse.StatusCode != http.StatusOK {
+ t.Errorf("Expected an HTTP status code %#v but got %#v", http.StatusOK, httpResponse.StatusCode)
+ }
+ res = parseRPCResponse(t, body)
+ if res.Error == nil || res.Error.Code != ChaincodeDeployError.Code {
+ t.Errorf("Expected an error when sending non-existing chaincode path, but got %#v", res.Error)
+ }
+
+ // Test deploy without username
+ requestBody = `{
+ "jsonrpc": "2.0",
+ "ID": 123,
+ "method": "deploy",
+ "params": {
+ "type": 1,
+ "chaincodeID": {
+ "path": "github.com/hyperledger/fabric/core/rest/test_chaincode"
+ },
+ "ctorMsg": {
+ "function": "Init",
+ "args": []
+ }
+ }
+ }`
+ httpResponse, body = performHTTPPost(t, httpServer.URL+"/chaincode", []byte(requestBody))
+ res = parseRPCResponse(t, body)
+ if res.Error == nil || res.Error.Code != InvalidParams.Code {
+ t.Errorf("Expected an error when sending without username, but got %#v", res.Error)
+ }
+
+ // Test deploy with real chaincode path
+ requestBody = `{
+ "jsonrpc": "2.0",
+ "ID": 123,
+ "method": "deploy",
+ "params": {
+ "type": 1,
+ "chaincodeID": {
+ "path": "github.com/hyperledger/fabric/core/rest/test_chaincode"
+ },
+ "ctorMsg": {
+ "function": "Init",
+ "args": []
+ },
+ "secureContext": "myuser"
+ }
+ }`
+ httpResponse, body = performHTTPPost(t, httpServer.URL+"/chaincode", []byte(requestBody))
+ if httpResponse.StatusCode != http.StatusOK {
+ t.Errorf("Expected an HTTP status code %#v but got %#v", http.StatusOK, httpResponse.StatusCode)
+ }
+ res = parseRPCResponse(t, body)
+ if res.Error != nil {
+ t.Errorf("Expected success but got %#v", res.Error)
+ }
+ if res.Result.Status != "OK" {
+ t.Errorf("Expected OK but got %#v", res.Result.Status)
+ }
+ if res.Result.Message != "new_name_for_deployed_chaincode" {
+ t.Errorf("Expected 'new_name_for_deployed_chaincode' but got '%#v'", res.Result.Message)
+ }
+}
+
+func TestServerOpenchainREST_API_Chaincode_Invoke(t *testing.T) {
+ // Construct a ledger with 3 blocks.
+ ledger := ledger.InitTestLedger(t)
+ buildTestLedger1(ledger, t)
+
+ initGlobalServerOpenchain(t)
+
+ // Start the HTTP REST test server
+ httpServer := httptest.NewServer(buildOpenchainRESTRouter())
+ defer httpServer.Close()
+
+ // Test invoke without params
+ httpResponse, body := performHTTPPost(t, httpServer.URL+"/chaincode", []byte(`{"jsonrpc":"2.0","ID":123,"method":"invoke"}`))
+ if httpResponse.StatusCode != http.StatusBadRequest {
+ t.Errorf("Expected an HTTP status code %#v but got %#v", http.StatusBadRequest, httpResponse.StatusCode)
+ }
+ res := parseRPCResponse(t, body)
+ if res.Error == nil || res.Error.Code != InvalidParams.Code {
+ t.Errorf("Expected an error when sending missing params, but got %#v", res.Error)
+ }
+
+ // Login
+ performHTTPPost(t, httpServer.URL+"/registrar", []byte(`{"enrollId":"myuser","enrollSecret":"password"}`))
+
+ // Test invoke with "fail" function
+ httpResponse, body = performHTTPPost(t, httpServer.URL+"/chaincode", []byte(`{"jsonrpc":"2.0","ID":123,"method":"invoke","params":{"type":1,"chaincodeID":{"name":"dummy"},"ctorMsg":{"function":"fail","args":[]},"secureContext":"myuser"}}`))
+ if httpResponse.StatusCode != http.StatusOK {
+ t.Errorf("Expected an HTTP status code %#v but got %#v", http.StatusOK, httpResponse.StatusCode)
+ }
+ res = parseRPCResponse(t, body)
+ if res.Error == nil || res.Error.Code != ChaincodeInvokeError.Code {
+ t.Errorf("Expected an error when sending non-existing chaincode path, but got %#v", res.Error)
+ }
+
+ // Test invoke with "change_owner" function
+ httpResponse, body = performHTTPPost(t, httpServer.URL+"/chaincode", []byte(`{"jsonrpc":"2.0","ID":123,"method":"invoke","params":{"type":1,"chaincodeID":{"name":"dummy"},"ctorMsg":{"function":"change_owner","args":[]},"secureContext":"myuser"}}`))
+ if httpResponse.StatusCode != http.StatusOK {
+ t.Errorf("Expected an HTTP status code %#v but got %#v", http.StatusOK, httpResponse.StatusCode)
+ }
+ res = parseRPCResponse(t, body)
+ if res.Error != nil {
+ t.Errorf("Expected success but got %#v", res.Error)
+ }
+ if res.Result.Status != "OK" {
+ t.Errorf("Expected OK but got %#v", res.Result.Status)
+ }
+ if res.Result.Message != "change_owner_invoke_result" {
+ t.Errorf("Expected 'change_owner_invoke_result' but got '%v'", res.Result.Message)
+ }
+}
+
+func TestServerOpenchainREST_API_Chaincode_Query(t *testing.T) {
+ // Construct a ledger with 3 blocks.
+ ledger := ledger.InitTestLedger(t)
+ buildTestLedger1(ledger, t)
+
+ initGlobalServerOpenchain(t)
+
+ // Start the HTTP REST test server
+ httpServer := httptest.NewServer(buildOpenchainRESTRouter())
+ defer httpServer.Close()
+
+ // Test query without params
+ httpResponse, body := performHTTPPost(t, httpServer.URL+"/chaincode", []byte(`{"jsonrpc":"2.0","ID":123,"method":"query"}`))
+ if httpResponse.StatusCode != http.StatusBadRequest {
+ t.Errorf("Expected an HTTP status code %#v but got %#v", http.StatusBadRequest, httpResponse.StatusCode)
+ }
+ res := parseRPCResponse(t, body)
+ if res.Error == nil || res.Error.Code != InvalidParams.Code {
+ t.Errorf("Expected an error when sending missing params, but got %#v", res.Error)
+ }
+
+ // Login
+ performHTTPPost(t, httpServer.URL+"/registrar", []byte(`{"enrollId":"myuser","enrollSecret":"password"}`))
+
+ // Test query with non-existing chaincode name
+ httpResponse, body = performHTTPPost(t, httpServer.URL+"/chaincode", []byte(`{"jsonrpc":"2.0","ID":123,"method":"query","params":{"type":1,"chaincodeID":{"name":"non-existing"},"ctorMsg":{"function":"Init","args":[]},"secureContext":"myuser"}}`))
+ if httpResponse.StatusCode != http.StatusOK {
+ t.Errorf("Expected an HTTP status code %#v but got %#v", http.StatusOK, httpResponse.StatusCode)
+ }
+ res = parseRPCResponse(t, body)
+ if res.Error == nil || res.Error.Code != ChaincodeQueryError.Code {
+ t.Errorf("Expected an error when sending non-existing chaincode path, but got %#v", res.Error)
+ }
+
+ // Test query with fail function
+ httpResponse, body = performHTTPPost(t, httpServer.URL+"/chaincode", []byte(`{"jsonrpc":"2.0","ID":123,"method":"query","params":{"type":1,"chaincodeID":{"name":"dummy"},"ctorMsg":{"function":"fail","args":[]},"secureContext":"myuser"}}`))
+ if httpResponse.StatusCode != http.StatusOK {
+ t.Errorf("Expected an HTTP status code %#v but got %#v", http.StatusOK, httpResponse.StatusCode)
+ }
+ res = parseRPCResponse(t, body)
+ if res.Error == nil || res.Error.Code != ChaincodeQueryError.Code {
+ t.Errorf("Expected an error when chaincode query fails, but got %#v", res.Error)
+ }
+ if res.Error.Data != "Error when querying chaincode: Query failure with special-\" chars" {
+ t.Errorf("Expected an error message when chaincode query fails, but got %#v", res.Error.Data)
+ }
+
+ // Test query with get_owner function
+ httpResponse, body = performHTTPPost(t, httpServer.URL+"/chaincode", []byte(`{"jsonrpc":"2.0","ID":123,"method":"query","params":{"type":1,"chaincodeID":{"name":"dummy"},"ctorMsg":{"function":"get_owner","args":[]},"secureContext":"myuser"}}`))
+ if httpResponse.StatusCode != http.StatusOK {
+ t.Errorf("Expected an HTTP status code %#v but got %#v", http.StatusOK, httpResponse.StatusCode)
+ }
+ res = parseRPCResponse(t, body)
+ if res.Error != nil {
+ t.Errorf("Expected success but got %#v", res.Error)
+ }
+ if res.Result.Status != "OK" {
+ t.Errorf("Expected OK but got %#v", res.Result.Status)
+ }
+ if res.Result.Message != "get_owner_query_result" {
+ t.Errorf("Expected 'get_owner_query_result' but got '%v'", res.Result.Message)
+ }
+}
+
+func TestServerOpenchainREST_API_NotFound(t *testing.T) {
+ httpServer := httptest.NewServer(buildOpenchainRESTRouter())
+ defer httpServer.Close()
+ body := performHTTPGet(t, httpServer.URL+"/non-existing")
+ res := parseRESTResult(t, body)
+ if res.Error != "Openchain endpoint not found." {
+ t.Errorf("Expected an error when accessing non-existing endpoint, but got %#v", res.Error)
+ }
+}
diff --git a/core/rest/rest_test.yaml b/core/rest/rest_test.yaml
new file mode 100644
index 00000000000..4acbe820fda
--- /dev/null
+++ b/core/rest/rest_test.yaml
@@ -0,0 +1,472 @@
+###############################################################################
+#
+# CLI section
+#
+###############################################################################
+cli:
+
+ # The address that the cli process will use for callbacks from chaincodes
+ address: 0.0.0.0:30304
+
+
+
+###############################################################################
+#
+# REST section
+#
+###############################################################################
+rest:
+
+ # Enable/disable setting for the REST service. It is recommended to disable
+ # REST service on validators in production deployment and use non-validating
+ # nodes to host REST service
+ enabled: true
+
+ # The address that the REST service will listen on for incoming requests.
+ address: 0.0.0.0:5000
+
+ validPatterns:
+
+ # Valid enrollment ID pattern in URLs: At least one character long, and
+ # all characters are A-Z, a-z, 0-9 or _.
+ enrollmentID: '^\w+$'
+
+###############################################################################
+#
+# LOGGING section
+#
+###############################################################################
+logging:
+
+ # Valid logging levels are case-insensitive strings chosen from
+
+ # CRITICAL | ERROR | WARNING | NOTICE | INFO | DEBUG
+
+ # Logging 'module' names are also strings, however valid module names are
+ # defined at runtime and are not checked for validity during option
+ # processing.
+
+ # Default logging levels are specified here for each of the peer
+ # commands. For commands that have subcommands, the defaults also apply to
+ # all subcommands of the command. These logging levels can be overridden
+ # on the command line using the --logging-level command-line option, or by
+ # setting the CORE_LOGGING_LEVEL environment variable.
+
+ # The logging level specification is of the form
+
+ # [[,...]=][:[[,...]=]...]
+
+ # A logging level by itself is taken as the overall default. Otherwise,
+ # overrides for individual or groups of modules can be specified using the
+ # [,...]= syntax.
+
+ # Examples:
+ # info - Set default to INFO
+ # warning:main,db=debug:chaincode=info - Override default WARNING in main,db,chaincode
+ # chaincode=info:main=debug:db=debug:warning - Same as above
+ peer: debug
+ crypto: info
+ status: warning
+ stop: warning
+ login: warning
+ vm: warning
+ chaincode: warning
+
+
+###############################################################################
+#
+# Peer section
+#
+###############################################################################
+peer:
+
+ # Peer Version following version semantics as described here http://semver.org/
+ # The Peer supplies this version in communications with other Peers
+ version: 0.1.0
+
+ # The Peer id is used for identifying this Peer instance.
+ id: jdoe
+
+ # The privateKey to be used by this peer
+ # privateKey: 794ef087680e2494fa4918fd8fb80fb284b50b57d321a31423fe42b9ccf6216047cea0b66fe8365a8e3f2a8140c6866cc45852e63124668bee1daa9c97da0c2a
+
+ # The networkId allows for logical seperation of networks
+ # networkId: dev
+ # networkId: test
+ networkId: dev
+
+ # The Address this Peer will listen on
+ listenAddress: 0.0.0.0:30303
+ # The Address this Peer will bind to for providing services
+ address: 0.0.0.0:30303
+ # Whether the Peer should programmatically determine the address to bind to.
+ # This case is useful for docker containers.
+ addressAutoDetect: false
+
+ # Setting for runtime.GOMAXPROCS(n). If n < 1, it does not change the current setting
+ gomaxprocs: -1
+ workers: 2
+
+ # Sync related configuration
+ sync:
+ blocks:
+ # Channel size for readonly SyncBlocks messages channel for receiving
+ # blocks from oppositie Peer Endpoints.
+ # NOTE: currently messages are not stored and forwarded, but rather
+ # lost if the channel write blocks.
+ channelSize: 10
+ state:
+ snapshot:
+ # Channel size for readonly syncStateSnapshot messages channel
+ # for receiving state deltas for snapshot from oppositie Peer Endpoints.
+ # NOTE: currently messages are not stored and forwarded, but
+ # rather lost if the channel write blocks.
+ channelSize: 50
+ deltas:
+ # Channel size for readonly syncStateDeltas messages channel for
+ # receiving state deltas for a syncBlockRange from oppositie
+ # Peer Endpoints.
+ # NOTE: currently messages are not stored and forwarded,
+ # but rather lost if the channel write blocks.
+ channelSize: 20
+
+ # Validator defines whether this peer is a validating peer or not, and if
+ # it is enabled, what consensus plugin to load
+ validator:
+ enabled: true
+
+ consensus:
+ # Consensus plugin to use. The value is the name of the plugin, e.g. pbft, noops ( this value is case-insensitive)
+ # if the given value is not recognized, we will default to noops
+ plugin: noops
+
+ # total number of consensus messages which will be buffered per connection before delivery is rejected
+ buffersize: 1000
+
+ events:
+ # The address that the Event service will be enabled on the validator
+ address: 0.0.0.0:31315
+
+ # total number of events that could be buffered without blocking the
+ # validator sends
+ buffersize: 100
+
+ # milliseconds timeout for producer to send an event.
+ # if < 0, if buffer full, unblocks immediately and not send
+ # if 0, if buffer full, will block and guarantee the event will be sent out
+ # if > 0, if buffer full, blocks till timeout
+ timeout: 10
+
+ # TLS Settings for p2p communications
+ tls:
+ enabled: false
+ cert:
+ file: testdata/server1.pem
+ key:
+ file: testdata/server1.key
+ # The server name use to verify the hostname returned by TLS handshake
+ serverhostoverride:
+
+ # PKI member services properties
+ pki:
+ eca:
+ paddr: localhost:50051
+ tca:
+ paddr: localhost:50051
+ tlsca:
+ paddr: localhost:50051
+ tls:
+ enabled: false
+ rootcert:
+ file: tlsca.cert
+ # The server name use to verify the hostname returned by TLS handshake
+ serverhostoverride:
+
+ # Peer discovery settings. Controls how this peer discovers other peers
+ discovery:
+
+ # The root nodes are used for bootstrapping purposes, and generally
+ # supplied through ENV variables
+ # It can be either a single host or a comma separated list of hosts.
+ rootnode:
+
+ # The duration of time between attempts to asks peers for their connected peers
+ period: 5s
+
+ ## leaving this in for example of sub map entry
+ # testNodes:
+ # - node : 1
+ # ip : 127.0.0.1
+ # port : 30303
+ # - node : 2
+ # ip : 127.0.0.1
+ # port : 30303
+
+ # Should the discovered nodes and their reputations
+ # be stored in DB and persisted between restarts
+ persist: true
+
+ # if peer discovery is off
+ # the peer window will show
+ # only what retrieved by active
+ # peer [true/false]
+ enabled: true
+
+ # number of workers that
+ # tast the peers for being
+ # online [1..10]
+ workers: 8
+
+ # the period in seconds with which the discovery
+ # tries to reconnect to successful nodes
+ # 0 means the nodes are not reconnected
+ touchPeriod: 6s
+
+ # the maximum nuber of nodes to reconnect to
+ # -1 for unlimited
+ touchMaxNodes: 100
+
+ # Path on the file system where peer will store data
+ fileSystemPath: /var/hyperledger/test/rest_test
+
+
+ profile:
+ enabled: false
+ listenAddress: 0.0.0.0:6060
+
+###############################################################################
+#
+# VM section
+#
+###############################################################################
+vm:
+
+ # Endpoint of the vm management system. For docker can be one of the following in general
+ # unix:///var/run/docker.sock
+ # http://localhost:2375
+ # https://localhost:2376
+ endpoint: unix:///var/run/docker.sock
+
+ # settings for docker vms
+ docker:
+ tls:
+ enabled: false
+ cert:
+ file: /path/to/server.pem
+ ca:
+ file: /path/to/ca.pem
+ key:
+ file: /path/to/server-key.pem
+ # Parameters of docker container creating. For docker can created by custom parameters
+ # If you have your own ipam & dns-server for cluster you can use them to create container efficient.
+ # NetworkMode Sets the networking mode for the container. Supported standard values are: `host`(default),`bridge`,`ipvlan`,`none`
+ # dns A list of DNS servers for the container to use.
+ # note: not support customize for `Privileged`
+ # LogConfig sets the logging driver (Type) and related options (Config) for Docker
+ # you can refer https://docs.docker.com/engine/admin/logging/overview/ for more detail configruation.
+ hostConfig:
+ NetworkMode: host
+ Dns:
+ # - 192.168.0.1
+ LogConfig:
+ Type: json-file
+ Config:
+ max-size: "50m"
+ max-file: "5"
+
+###############################################################################
+#
+# Chaincode section
+#
+###############################################################################
+chaincode:
+
+ # The id is used by the Chaincode stub to register the executing Chaincode
+ # ID with the Peerand is generally supplied through ENV variables
+ # the Path form of ID is provided when deploying the chaincode. The name is
+ # used for all other requests. The name is really a hashcode
+ # returned by the system in response to the deploy transaction. In
+ # development mode where user runs the chaincode, the name can be any string
+ id:
+ path:
+ name:
+
+ golang:
+
+ # This is the basis for the Golang Dockerfile. Additional commands will
+ # be appended depedendent upon the chaincode specification.
+ Dockerfile: |
+ from hyperledger/fabric-baseimage
+ #from utxo:0.1.0
+ COPY src $GOPATH/src
+ WORKDIR $GOPATH
+
+ car:
+
+ # This is the basis for the CAR Dockerfile. Additional commands will
+ # be appended depedendent upon the chaincode specification.
+ Dockerfile: |
+ FROM hyperledger/fabric-ccenv
+
+ # timeout in millisecs for starting up a container and waiting for Register
+ # to come through. 1sec should be plenty for chaincode unit tests
+ startuptimeout: 1000
+
+ #timeout in millisecs for deploying chaincode from a remote repository.
+ deploytimeout: 30000
+
+ #mode - options are "dev", "net"
+ #dev - in dev mode, user runs the chaincode after starting validator from
+ # command line on local machine
+ #net - in net mode validator will run chaincode in a docker container
+
+ mode: net
+ # typically installpath should not be modified. Otherwise, user must ensure
+ # the chaincode executable is placed in the path specifed by installpath in
+ # the image
+ installpath: /opt/gopath/bin/
+
+ # keepalive in seconds. In situations where the communiction goes through a
+ # proxy that does not support keep-alive, this parameter will maintain connection
+ # between peer and chaincode.
+ # A value <= 0 turns keepalive off
+ keepalive: 0
+
+###############################################################################
+#
+###############################################################################
+#
+# Ledger section - ledger configuration encompases both the blockchain
+# and the state
+#
+###############################################################################
+ledger:
+
+ blockchain:
+
+ # Define the genesis block
+ genesisBlock:
+
+ state:
+
+ # Control the number state deltas that are maintained. This takes additional
+ # disk space, but allow the state to be rolled backwards and forwards
+ # without the need to replay transactions.
+ deltaHistorySize: 500
+
+ # The data structure in which the state will be stored. Different data
+ # structures may offer different performance characteristics.
+ # Options are 'buckettree', 'trie' and 'raw'.
+ # ( Note:'raw' is experimental and incomplete. )
+ # If not set, the default data structure is the 'buckettree'.
+ # This CANNOT be changed after the DB has been created.
+ dataStructure:
+ # The name of the data structure is for storing the state
+ name: buckettree
+ # The data structure specific configurations
+ configs:
+ # configurations for 'bucketree'. These CANNOT be changed after the DB
+ # has been created. 'numBuckets' defines the number of bins that the
+ # state key-values are to be divided
+ numBuckets: 1000003
+ # 'maxGroupingAtEachLevel' defines the number of bins that are grouped
+ #together to construct next level of the merkle-tree (this is applied
+ # repeatedly for constructing the entire tree).
+ maxGroupingAtEachLevel: 5
+ # 'bucketCacheSize' defines the size (in MBs) of the cache that is used to keep
+ # the buckets (from root upto secondlast level) in memory. This cache helps
+ # in making state hash computation faster. A value less than or equals to zero
+ # leads to disabling this caching. This caching helps more if transactions
+ # perform significant writes.
+ bucketCacheSize: 100
+
+ # configurations for 'trie'
+ # 'tire' has no additional configurations exposed as yet
+
+
+###############################################################################
+#
+# Security section - Applied to all entities (client, NVP, VP)
+#
+###############################################################################
+security:
+ # Enable security will force every entity on the network to enroll with obc-ca
+ # and maintain a valid set of certificates in order to communicate with
+ # other peers
+ enabled: true
+ # To enroll NVP or VP with membersrvc. These parameters are for 1 time use.
+ # They will not be valid on subsequent times without un-enroll first.
+ # The values come from off-line registration with obc-ca. For testing, make
+ # sure the values are in membersrvc/membersrvc.yaml file eca.users
+ enrollID: vp
+ enrollSecret: f3489fy98ghf
+ # To enable privacy of transactions (requires security to be enabled). This
+ # encrypts the transaction content during transit and at rest. The state
+ # data is also encrypted
+ privacy: false
+
+ # Can be 256 or 384. If you change here, you have to change also
+ # the same property in membersrvc.yaml to the same value
+ level: 256
+
+ # Can be SHA2 or SHA3. If you change here, you have to change also
+ # the same property in membersrvc.yaml to the same value
+ hashAlgorithm: SHA3
+
+ # TCerts related configuration
+ tcert:
+ batch:
+ # The size of the batch of TCerts
+ size: 200
+ # Enable the release of keys needed to decrypt attributes from TCerts in
+ # the chaincode using the metadata field of the transaction (requires
+ # security to be enabled).
+ attributes:
+ enabled: false
+ multithreading:
+ enabled: false
+
+ # Confidentiality protocol versions supported: 1.2
+ confidentialityProtocolVersion: 1.2
+
+################################################################################
+#
+# SECTION: STATETRANSFER
+#
+# - This applies to recovery behavior when the replica has detected
+# a state transfer is required
+#
+# - This might happen:
+# - During a view change in response to a faulty primary
+# - After a network outage which has isolated the replica
+# - If the current blockchain/state is determined to be corrupt
+#
+################################################################################
+statetransfer:
+
+ # Should a replica attempt to fix damaged blocks?
+ # In general, this should be set to true, setting to false will cause
+ # the replica to panic, and require a human's intervention to intervene
+ # and fix the corruption
+ recoverdamage: true
+
+ # The number of blocks to retrieve per sync request
+ blocksperrequest: 20
+
+ # The maximum number of state deltas to attempt to retrieve
+ # If more than this number of deltas is required to play the state up to date
+ # then instead the state will be flagged as invalid, and a full copy of the state
+ # will be retrieved instead
+ maxdeltas: 200
+
+ # Timeouts
+ timeout:
+
+ # How long may returning a single block take
+ singleblock: 2s
+
+ # How long may returning a single state delta take
+ singlestatedelta: 2s
+
+ # How long may transferring the complete state take
+ fullstate: 60s
diff --git a/core/rest/rest_util.go b/core/rest/rest_util.go
new file mode 100644
index 00000000000..ce8fb4f3148
--- /dev/null
+++ b/core/rest/rest_util.go
@@ -0,0 +1,56 @@
+/*
+Copyright IBM Corp. 2016 All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package rest
+
+import "encoding/json"
+
+// isJSON is a helper function to determine if a given string is proper JSON.
+func isJSON(s string) bool {
+ var js map[string]interface{}
+ return json.Unmarshal([]byte(s), &js) == nil
+}
+
+// formatRPCError formats the ERROR response to aid in JSON RPC 2.0 implementation
+func formatRPCError(code int64, msg string, data string) rpcResult {
+ err := &rpcError{Code: code, Message: msg, Data: data}
+ error := rpcResult{Status: "Error", Error: err}
+
+ return error
+}
+
+// formatRPCOK formats the OK response to aid in JSON RPC 2.0 implementation
+func formatRPCOK(msg string) rpcResult {
+ result := rpcResult{Status: "OK", Message: msg}
+
+ return result
+}
+
+// formatRPCResponse consumes either an RPC ERROR or OK rpcResult and formats it
+// in accordance with the JSON RPC 2.0 specification.
+func formatRPCResponse(res rpcResult, id *rpcID) rpcResponse {
+ var response rpcResponse
+
+ // Format a successful response
+ if res.Status == "OK" {
+ response = rpcResponse{Jsonrpc: "2.0", Result: &res, ID: id}
+ } else {
+ // Format an error response
+ response = rpcResponse{Jsonrpc: "2.0", Error: res.Error, ID: id}
+ }
+
+ return response
+}
diff --git a/core/system_chaincode/api/sysccapi.go b/core/system_chaincode/api/sysccapi.go
new file mode 100644
index 00000000000..6b98ec86ead
--- /dev/null
+++ b/core/system_chaincode/api/sysccapi.go
@@ -0,0 +1,119 @@
+/*
+Copyright IBM Corp. 2016 All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package api
+
+import (
+ "fmt"
+
+ "golang.org/x/net/context"
+
+ "github.com/hyperledger/fabric/core/chaincode"
+ "github.com/hyperledger/fabric/core/chaincode/shim"
+ "github.com/hyperledger/fabric/core/container/inproccontroller"
+ "github.com/hyperledger/fabric/core/peer"
+ "github.com/hyperledger/fabric/protos"
+ "github.com/op/go-logging"
+ "github.com/spf13/viper"
+)
+
+var sysccLogger = logging.MustGetLogger("sysccapi")
+
+// SystemChaincode defines the metadata needed to initialize system chaincode
+// when the fabric comes up. SystemChaincodes are installed by adding an
+// entry in importsysccs.go
+type SystemChaincode struct {
+ // Enabled a convenient switch to enable/disable system chaincode without
+ // having to remove entry from importsysccs.go
+ Enabled bool
+
+ //Unique name of the system chaincode
+ Name string
+
+ //Path to the system chaincode; currently not used
+ Path string
+
+ //InitArgs initialization arguments to startup the system chaincode
+ InitArgs []string
+
+ // Chaincode is the actual chaincode object
+ Chaincode shim.Chaincode
+}
+
+// RegisterSysCC registers the given system chaincode with the peer
+func RegisterSysCC(syscc *SystemChaincode) error {
+ if peer.SecurityEnabled() {
+ sysccLogger.Warning(fmt.Sprintf("Currently system chaincode does support security(%s,%s)", syscc.Name, syscc.Path))
+ return nil
+ }
+ if !syscc.Enabled || !isWhitelisted(syscc) {
+ sysccLogger.Info(fmt.Sprintf("system chaincode (%s,%s) disabled", syscc.Name, syscc.Path))
+ return nil
+ }
+
+ err := inproccontroller.Register(syscc.Path, syscc.Chaincode)
+ if err != nil {
+ errStr := fmt.Sprintf("could not register (%s,%v): %s", syscc.Path, syscc, err)
+ sysccLogger.Error(errStr)
+ return fmt.Errorf(errStr)
+ }
+
+ chaincodeID := &protos.ChaincodeID{Path: syscc.Path, Name: syscc.Name}
+ spec := protos.ChaincodeSpec{Type: protos.ChaincodeSpec_Type(protos.ChaincodeSpec_Type_value["GOLANG"]), ChaincodeID: chaincodeID, CtorMsg: &protos.ChaincodeInput{Args: syscc.InitArgs}}
+
+ if deployErr := deploySysCC(context.Background(), &spec); deployErr != nil {
+ errStr := fmt.Sprintf("deploy chaincode failed: %s", deployErr)
+ sysccLogger.Error(errStr)
+ return fmt.Errorf(errStr)
+ }
+
+ sysccLogger.Info("system chaincode %s(%s) registered", syscc.Name, syscc.Path)
+ return err
+}
+
+// buildLocal builds a given chaincode code
+func buildSysCC(context context.Context, spec *protos.ChaincodeSpec) (*protos.ChaincodeDeploymentSpec, error) {
+ var codePackageBytes []byte
+ chaincodeDeploymentSpec := &protos.ChaincodeDeploymentSpec{ExecEnv: protos.ChaincodeDeploymentSpec_SYSTEM, ChaincodeSpec: spec, CodePackage: codePackageBytes}
+ return chaincodeDeploymentSpec, nil
+}
+
+// deployLocal deploys the supplied chaincode image to the local peer
+func deploySysCC(ctx context.Context, spec *protos.ChaincodeSpec) error {
+ // First build and get the deployment spec
+ chaincodeDeploymentSpec, err := buildSysCC(ctx, spec)
+
+ if err != nil {
+ sysccLogger.Error(fmt.Sprintf("Error deploying chaincode spec: %v\n\n error: %s", spec, err))
+ return err
+ }
+
+ transaction, err := protos.NewChaincodeDeployTransaction(chaincodeDeploymentSpec, chaincodeDeploymentSpec.ChaincodeSpec.ChaincodeID.Name)
+ if err != nil {
+ return fmt.Errorf("Error deploying chaincode: %s ", err)
+ }
+
+ _, _, err = chaincode.Execute(ctx, chaincode.GetChain(chaincode.DefaultChain), transaction)
+
+ return err
+}
+
+func isWhitelisted(syscc *SystemChaincode) bool {
+ chaincodes := viper.GetStringMapString("chaincode.system")
+ val, ok := chaincodes[syscc.Name]
+ enabled := val == "enable" || val == "true" || val == "yes"
+ return ok && enabled
+}
diff --git a/core/system_chaincode/config.go b/core/system_chaincode/config.go
new file mode 100644
index 00000000000..99966908706
--- /dev/null
+++ b/core/system_chaincode/config.go
@@ -0,0 +1,68 @@
+/*
+Copyright IBM Corp. 2016 All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package system_chaincode
+
+import (
+ "flag"
+ "fmt"
+ "strings"
+
+ "github.com/op/go-logging"
+ "github.com/spf13/viper"
+)
+
+// Config the config wrapper structure
+type Config struct {
+}
+
+func init() {
+
+}
+
+// SetupTestLogging setup the logging during test execution
+func SetupTestLogging() {
+ level, err := logging.LogLevel(viper.GetString("logging.peer"))
+ if err == nil {
+ // No error, use the setting
+ logging.SetLevel(level, "main")
+ logging.SetLevel(level, "server")
+ logging.SetLevel(level, "peer")
+ } else {
+ logging.SetLevel(logging.ERROR, "main")
+ logging.SetLevel(logging.ERROR, "server")
+ logging.SetLevel(logging.ERROR, "peer")
+ }
+}
+
+// SetupTestConfig setup the config during test execution
+func SetupTestConfig() {
+ flag.Parse()
+
+ // Now set the configuration file
+ viper.SetEnvPrefix("CORE")
+ viper.AutomaticEnv()
+ replacer := strings.NewReplacer(".", "_")
+ viper.SetEnvKeyReplacer(replacer)
+ viper.SetConfigName("core") // name of config file (without extension)
+ viper.AddConfigPath("../../peer/") // path to look for the config file in
+ err := viper.ReadInConfig() // Find and read the config file
+ if err != nil { // Handle errors reading the config file
+ panic(fmt.Errorf("Fatal error config file: %s \n", err))
+ }
+
+ SetupTestLogging()
+}
diff --git a/core/system_chaincode/importsysccs.go b/core/system_chaincode/importsysccs.go
new file mode 100644
index 00000000000..06bca7e9a96
--- /dev/null
+++ b/core/system_chaincode/importsysccs.go
@@ -0,0 +1,41 @@
+/*
+Copyright IBM Corp. 2016 All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package system_chaincode
+
+import (
+ "github.com/hyperledger/fabric/core/system_chaincode/api"
+ //import system chain codes here
+ "github.com/hyperledger/fabric/bddtests/syschaincode/noop"
+)
+
+//see systemchaincode_test.go for an example using "sample_syscc"
+var systemChaincodes = []*api.SystemChaincode{
+ {
+ Enabled: true,
+ Name: "noop",
+ Path: "github.com/hyperledger/fabric/bddtests/syschaincode/noop",
+ InitArgs: []string{},
+ Chaincode: &noop.SystemChaincode{},
+ }}
+
+//RegisterSysCCs is the hook for system chaincodes where system chaincodes are registered with the fabric
+//note the chaincode must still be deployed and launched like a user chaincode will be
+func RegisterSysCCs() {
+ for _, sysCC := range systemChaincodes {
+ api.RegisterSysCC(sysCC)
+ }
+}
diff --git a/core/system_chaincode/samplesyscc/samplesyscc.go b/core/system_chaincode/samplesyscc/samplesyscc.go
new file mode 100644
index 00000000000..01ff2f1b1aa
--- /dev/null
+++ b/core/system_chaincode/samplesyscc/samplesyscc.go
@@ -0,0 +1,93 @@
+/*
+Copyright IBM Corp. 2016 All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package samplesyscc
+
+import (
+ "errors"
+
+ "github.com/hyperledger/fabric/core/chaincode/shim"
+)
+
+// SampleSysCC example simple Chaincode implementation
+type SampleSysCC struct {
+}
+
+// Init initializes the sample system chaincode by storing the key and value
+// arguments passed in as parameters
+func (t *SampleSysCC) Init(stub *shim.ChaincodeStub, function string, args []string) ([]byte, error) {
+ //as system chaincodes do not take part in consensus and are part of the system,
+ //best practice to do nothing (or very little) in Init.
+
+ return nil, nil
+}
+
+// Invoke gets the supplied key and if it exists, updates the key with the newly
+// supplied value.
+func (t *SampleSysCC) Invoke(stub *shim.ChaincodeStub, function string, args []string) ([]byte, error) {
+ var key, val string // Entities
+
+ if len(args) != 2 {
+ return nil, errors.New("need 2 args (key and a value)")
+ }
+
+ // Initialize the chaincode
+ key = args[0]
+ val = args[1]
+
+ _, err := stub.GetState(key)
+ if err != nil {
+ jsonResp := "{\"Error\":\"Failed to get val for " + key + "\"}"
+ return nil, errors.New(jsonResp)
+ }
+
+ // Write the state to the ledger
+ err = stub.PutState(key, []byte(val))
+ if err != nil {
+ return nil, err
+ }
+
+ return nil, nil
+}
+
+// Query callback representing the query of a chaincode
+func (t *SampleSysCC) Query(stub *shim.ChaincodeStub, function string, args []string) ([]byte, error) {
+ if function != "getval" {
+ return nil, errors.New("Invalid query function name. Expecting \"getval\"")
+ }
+ var key string // Entities
+ var err error
+
+ if len(args) != 1 {
+ return nil, errors.New("Incorrect number of arguments. Expecting key to query")
+ }
+
+ key = args[0]
+
+ // Get the state from the ledger
+ valbytes, err := stub.GetState(key)
+ if err != nil {
+ jsonResp := "{\"Error\":\"Failed to get state for " + key + "\"}"
+ return nil, errors.New(jsonResp)
+ }
+
+ if valbytes == nil {
+ jsonResp := "{\"Error\":\"Nil val for " + key + "\"}"
+ return nil, errors.New(jsonResp)
+ }
+
+ return valbytes, nil
+}
diff --git a/core/system_chaincode/systemchaincode_test.go b/core/system_chaincode/systemchaincode_test.go
new file mode 100644
index 00000000000..ddeee71a8bb
--- /dev/null
+++ b/core/system_chaincode/systemchaincode_test.go
@@ -0,0 +1,152 @@
+/*
+Copyright IBM Corp. 2016 All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package system_chaincode
+
+import (
+ "fmt"
+ "net"
+ "os"
+ "testing"
+ "time"
+
+ "github.com/hyperledger/fabric/core/chaincode"
+ "github.com/hyperledger/fabric/core/ledger"
+ "github.com/hyperledger/fabric/core/system_chaincode/api"
+ "github.com/hyperledger/fabric/core/system_chaincode/samplesyscc"
+ "github.com/hyperledger/fabric/core/util"
+ pb "github.com/hyperledger/fabric/protos"
+ "github.com/spf13/viper"
+ "golang.org/x/net/context"
+ "google.golang.org/grpc"
+)
+
+// Invoke or query a chaincode.
+func invoke(ctx context.Context, spec *pb.ChaincodeSpec, typ pb.Transaction_Type) (*pb.ChaincodeEvent, string, []byte, error) {
+ chaincodeInvocationSpec := &pb.ChaincodeInvocationSpec{ChaincodeSpec: spec}
+
+ // Now create the Transactions message and send to Peer.
+ uuid := util.GenerateUUID()
+
+ var transaction *pb.Transaction
+ var err error
+ transaction, err = pb.NewChaincodeExecute(chaincodeInvocationSpec, uuid, typ)
+ if err != nil {
+ return nil, uuid, nil, fmt.Errorf("Error invoking chaincode: %s ", err)
+ }
+
+ var retval []byte
+ var execErr error
+ var ccevt *pb.ChaincodeEvent
+ if typ == pb.Transaction_CHAINCODE_QUERY {
+ retval, ccevt, execErr = chaincode.Execute(ctx, chaincode.GetChain(chaincode.DefaultChain), transaction)
+ } else {
+ ledger, _ := ledger.GetLedger()
+ ledger.BeginTxBatch("1")
+ retval, ccevt, execErr = chaincode.Execute(ctx, chaincode.GetChain(chaincode.DefaultChain), transaction)
+ if err != nil {
+ return nil, uuid, nil, fmt.Errorf("Error invoking chaincode: %s ", err)
+ }
+ ledger.CommitTxBatch("1", []*pb.Transaction{transaction}, nil, nil)
+ }
+
+ return ccevt, uuid, retval, execErr
+}
+
+func closeListenerAndSleep(l net.Listener) {
+ if l != nil {
+ l.Close()
+ time.Sleep(2 * time.Second)
+ }
+}
+
+// Test deploy of a transaction.
+func TestExecuteDeploySysChaincode(t *testing.T) {
+ var opts []grpc.ServerOption
+ grpcServer := grpc.NewServer(opts...)
+ viper.Set("peer.fileSystemPath", "/var/hyperledger/test/tmpdb")
+
+ //use a different address than what we usually use for "peer"
+ //we override the peerAddress set in chaincode_support.go
+ peerAddress := "0.0.0.0:21726"
+ lis, err := net.Listen("tcp", peerAddress)
+ if err != nil {
+ t.Fail()
+ t.Logf("Error starting peer listener %s", err)
+ return
+ }
+
+ getPeerEndpoint := func() (*pb.PeerEndpoint, error) {
+ return &pb.PeerEndpoint{ID: &pb.PeerID{Name: "testpeer"}, Address: peerAddress}, nil
+ }
+
+ ccStartupTimeout := time.Duration(5000) * time.Millisecond
+ pb.RegisterChaincodeSupportServer(grpcServer, chaincode.NewChaincodeSupport(chaincode.DefaultChain, getPeerEndpoint, false, ccStartupTimeout, nil))
+
+ go grpcServer.Serve(lis)
+
+ var ctxt = context.Background()
+
+ //set systemChaincodes to sample
+ systemChaincodes = []*api.SystemChaincode{
+ {
+ Enabled: true,
+ Name: "sample_syscc",
+ Path: "github.com/hyperledger/fabric/core/system_chaincode/samplesyscc",
+ InitArgs: []string{},
+ Chaincode: &samplesyscc.SampleSysCC{},
+ },
+ }
+
+ // System chaincode has to be enabled
+ viper.Set("chaincode.system", map[string]string{"sample_syscc": "true"})
+ RegisterSysCCs()
+
+ url := "github.com/hyperledger/fabric/core/system_chaincode/sample_syscc"
+ f := "putval"
+ args := []string{"greeting", "hey there"}
+
+ spec := &pb.ChaincodeSpec{Type: 1, ChaincodeID: &pb.ChaincodeID{Name: "sample_syscc", Path: url}, CtorMsg: &pb.ChaincodeInput{Function: f, Args: args}}
+ _, _, _, err = invoke(ctxt, spec, pb.Transaction_CHAINCODE_INVOKE)
+ if err != nil {
+ closeListenerAndSleep(lis)
+ t.Fail()
+ t.Logf("Error invoking sample_syscc: %s", err)
+ return
+ }
+
+ f = "getval"
+ args = []string{"greeting"}
+ spec = &pb.ChaincodeSpec{Type: 1, ChaincodeID: &pb.ChaincodeID{Name: "sample_syscc", Path: url}, CtorMsg: &pb.ChaincodeInput{Function: f, Args: args}}
+ _, _, _, err = invoke(ctxt, spec, pb.Transaction_CHAINCODE_QUERY)
+ if err != nil {
+ closeListenerAndSleep(lis)
+ t.Fail()
+ t.Logf("Error invoking sample_syscc: %s", err)
+ return
+ }
+
+ cds := &pb.ChaincodeDeploymentSpec{ExecEnv: 1, ChaincodeSpec: &pb.ChaincodeSpec{Type: 1, ChaincodeID: &pb.ChaincodeID{Name: "sample_syscc", Path: url}, CtorMsg: &pb.ChaincodeInput{Args: args}}}
+
+ chaincode.GetChain(chaincode.DefaultChain).Stop(ctxt, cds)
+
+ closeListenerAndSleep(lis)
+}
+
+func TestMain(m *testing.M) {
+ SetupTestConfig()
+ os.Exit(m.Run())
+}
diff --git a/core/util/utils.go b/core/util/utils.go
new file mode 100644
index 00000000000..9454efcbe2c
--- /dev/null
+++ b/core/util/utils.go
@@ -0,0 +1,141 @@
+/*
+Copyright IBM Corp. 2016 All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package util
+
+import (
+ "crypto/rand"
+ "crypto/sha256"
+ "encoding/base64"
+ "fmt"
+ "io"
+ "math/big"
+ "strings"
+ "time"
+
+ gp "google/protobuf"
+
+ "golang.org/x/crypto/sha3"
+)
+
+type alg struct {
+ hashFun func([]byte) string
+ decoder func(string) ([]byte, error)
+}
+
+var availableIDgenAlgs = map[string]alg{
+ "sha256base64": alg{GenerateUUIDfromTxSHAHash, base64.StdEncoding.DecodeString},
+}
+
+// ComputeCryptoHash should be used in openchain code so that we can change the actual algo used for crypto-hash at one place
+func ComputeCryptoHash(data []byte) (hash []byte) {
+ hash = make([]byte, 64)
+ sha3.ShakeSum256(hash, data)
+ return
+}
+
+// GenerateBytesUUID returns a UUID based on RFC 4122 returning the generated bytes
+func GenerateBytesUUID() []byte {
+ uuid := make([]byte, 16)
+ _, err := io.ReadFull(rand.Reader, uuid)
+ if err != nil {
+ panic(fmt.Sprintf("Error generating UUID: %s", err))
+ }
+
+ // variant bits; see section 4.1.1
+ uuid[8] = uuid[8]&^0xc0 | 0x80
+
+ // version 4 (pseudo-random); see section 4.1.3
+ uuid[6] = uuid[6]&^0xf0 | 0x40
+
+ return uuid
+}
+
+// GenerateIntUUID returns a UUID based on RFC 4122 returning a big.Int
+func GenerateIntUUID() *big.Int {
+ uuid := GenerateBytesUUID()
+ z := big.NewInt(0)
+ return z.SetBytes(uuid)
+}
+
+// GenerateUUID returns a UUID based on RFC 4122
+func GenerateUUID() string {
+ uuid := GenerateBytesUUID()
+ return uuidBytesToStr(uuid)
+}
+
+// CreateUtcTimestamp returns a google/protobuf/Timestamp in UTC
+func CreateUtcTimestamp() *gp.Timestamp {
+ now := time.Now().UTC()
+ secs := now.Unix()
+ nanos := int32(now.UnixNano() - (secs * 1000000000))
+ return &(gp.Timestamp{Seconds: secs, Nanos: nanos})
+}
+
+//GenerateHashFromSignature returns a hash of the combined parameters
+func GenerateHashFromSignature(path string, ctor string, args []string) []byte {
+ fargs := ctor
+ if args != nil {
+ for _, str := range args {
+ fargs = fargs + str
+ }
+ }
+ cbytes := []byte(path + fargs)
+
+ b := make([]byte, len(cbytes))
+ copy(b, cbytes)
+ hash := ComputeCryptoHash(b)
+ return hash
+}
+
+// GenerateUUIDfromTxSHAHash generates SHA256 hash using Tx payload, and uses its first
+// 128 bits as a UUID
+func GenerateUUIDfromTxSHAHash(txData []byte) string {
+ txHash := sha256.Sum256(txData)
+ return uuidBytesToStr(txHash[0:16])
+}
+
+// GenerateIDWithAlg generates an ID using a custom algorithm
+func GenerateIDWithAlg(customIDgenAlg string, encodedPayload string) (string, error) {
+ var alg = availableIDgenAlgs[customIDgenAlg]
+ if alg.hashFun != nil && alg.decoder != nil {
+ var payload, err = alg.decoder(encodedPayload)
+ if err != nil {
+ return "", err
+ }
+ return alg.hashFun(payload), nil
+ }
+ return "", fmt.Errorf("Wrong UUID generation algorithm was given.")
+}
+
+func uuidBytesToStr(uuid []byte) string {
+ return fmt.Sprintf("%x-%x-%x-%x-%x", uuid[0:4], uuid[4:6], uuid[6:8], uuid[8:10], uuid[10:])
+}
+
+// FindMissingElements identifies the elements of the first slice that are not present in the second
+// The second slice is expected to be a subset of the first slice
+func FindMissingElements(all []string, some []string) (delta []string) {
+all:
+ for _, v1 := range all {
+ for _, v2 := range some {
+ if strings.Compare(v1, v2) == 0 {
+ continue all
+ }
+ }
+ delta = append(delta, v1)
+ }
+ return
+}
diff --git a/core/util/utils_test.go b/core/util/utils_test.go
new file mode 100644
index 00000000000..97c0896124c
--- /dev/null
+++ b/core/util/utils_test.go
@@ -0,0 +1,85 @@
+/*
+Copyright IBM Corp. 2016 All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package util
+
+import (
+ "bytes"
+ "strings"
+ "testing"
+ "time"
+)
+
+func TestComputeCryptoHash(t *testing.T) {
+ if bytes.Compare(ComputeCryptoHash([]byte("foobar")), ComputeCryptoHash([]byte("foobar"))) != 0 {
+ t.Fatalf("Expected hashes to match, but they did not match")
+ }
+ if bytes.Compare(ComputeCryptoHash([]byte("foobar1")), ComputeCryptoHash([]byte("foobar2"))) == 0 {
+ t.Fatalf("Expected hashes to be different, but they match")
+ }
+}
+
+func TestUUIDGeneration(t *testing.T) {
+ uuid := GenerateUUID()
+ if len(uuid) != 36 {
+ t.Fatalf("UUID length is not correct. Expected = 36, Got = %d", len(uuid))
+ }
+ uuid2 := GenerateUUID()
+ if uuid == uuid2 {
+ t.Fatalf("Two UUIDs are equal. This should never occur")
+ }
+}
+
+func TestIntUUIDGeneration(t *testing.T) {
+ uuid := GenerateIntUUID()
+
+ uuid2 := GenerateIntUUID()
+ if uuid == uuid2 {
+ t.Fatalf("Two UUIDs are equal. This should never occur")
+ }
+}
+func TestTimestamp(t *testing.T) {
+ for i := 0; i < 10; i++ {
+ t.Logf("timestamp now: %v", CreateUtcTimestamp())
+ time.Sleep(200 * time.Millisecond)
+ }
+}
+
+func TestGenerateHashFromSignature(t *testing.T) {
+ if bytes.Compare(GenerateHashFromSignature("aPath", "aCtor", []string{"1", "2"}),
+ GenerateHashFromSignature("aPath", "aCtor", []string{"1", "2"})) != 0 {
+ t.Fatalf("Expected hashes to match, but they did not match")
+ }
+ if bytes.Compare(GenerateHashFromSignature("aPath", "aCtor", []string{"1", "2"}),
+ GenerateHashFromSignature("bPath", "bCtor", []string{"3", "4"})) == 0 {
+ t.Fatalf("Expected hashes to be different, but they match")
+ }
+}
+
+func TestFindMissingElements(t *testing.T) {
+ all := []string{"a", "b", "c", "d"}
+ some := []string{"b", "c"}
+ expectedDelta := []string{"a", "d"}
+ actualDelta := FindMissingElements(all, some)
+ if len(expectedDelta) != len(actualDelta) {
+ t.Fatalf("Got %v, expected %v", actualDelta, expectedDelta)
+ }
+ for i := range expectedDelta {
+ if strings.Compare(expectedDelta[i], actualDelta[i]) != 0 {
+ t.Fatalf("Got %v, expected %v", actualDelta, expectedDelta)
+ }
+ }
+}
diff --git a/devenv/README.md b/devenv/README.md
new file mode 100644
index 00000000000..8a71543b039
--- /dev/null
+++ b/devenv/README.md
@@ -0,0 +1,26 @@
+## Development Environment
+
+If you're looking for instructions on how to setup the Hyperledger Fabric development environment, see
+the [development environment setup readme](https://github.com/hyperledger/fabric/blob/master/docs/dev-setup/devenv.md).
+
+This folder contains the files which are used for bootstrapping the Hyperledger Fabric development environment.
+
+Below is a deployment diagram of the current development environment. Note that version numbers may be out of date.
+
+
+# Storage Backends
+
+You may optionally choose a docker storage backend other than the default.
+
+For a comparison of the assorted storage backends refer to [select a storage driver](https://docs.docker.com/engine/userguide/storagedriver/selectadriver/)
+
+Presently, the default is set to AUFS, but this may change in the future.
+
+To select a different storage backend (btrfs in this example), simply execute
+
+```
+vagrant destroy
+DOCKER_STORAGE_BACKEND=btrfs vagrant up
+```
+
+Currently supported backends are btfs, and aufs, with more to be added in the future.
diff --git a/devenv/Vagrantfile b/devenv/Vagrantfile
new file mode 100644
index 00000000000..b1b099139f6
--- /dev/null
+++ b/devenv/Vagrantfile
@@ -0,0 +1,88 @@
+# -*- mode: ruby -*-
+# vi: set ft=ruby :
+
+# This vagrantfile creates a VM with the development environment
+# configured and ready to go.
+#
+# The setup script (env var $script) in this file installs docker.
+# This is not in the setup.sh file because the docker install needs
+# to be secure when running on a real linux machine.
+# The docker environment that is installed by this script is not secure,
+# it depends on the host being secure.
+#
+# At the end of the setup script in this file, a call is made
+# to run setup.sh to create the developer environment.
+
+# This is the mount point for the sync_folders of the source
+SRCMOUNT = "/hyperledger"
+LOCALDEV = "/local-dev"
+
+$script = <