diff --git a/.devcontainer/Dockerfile b/.devcontainer/Dockerfile index cb2b98af66..0cc8fe0f5a 100644 --- a/.devcontainer/Dockerfile +++ b/.devcontainer/Dockerfile @@ -30,24 +30,19 @@ RUN gpg --keyserver keyserver.ubuntu.com --recv-keys 9692C00E657DDE61 && \ # bionic key RUN gpg --keyserver keyserver.ubuntu.com --recv-keys 3B4FE6ACC0B21F32 && \ gpg --export 3B4FE6ACC0B21F32 > /etc/apt/keyrings/ubuntu-bionic.gpg -# sovrin key -RUN gpg --keyserver keyserver.ubuntu.com --recv-keys CE7709D068DB5E88 && \ - gpg --export CE7709D068DB5E88 > /etc/apt/keyrings/sovrin.gpg # ======================================================================================================== # ToDo: # - Eliminate dependency on obsolete bionic repositories. # Plenum -# - https://github.com/hyperledger/indy-plenum/issues/1546 -# - Needed to pick up rocksdb=5.8.8 -RUN echo "deb [signed-by=/etc/apt/keyrings/hyperledger.gpg] https://hyperledger.jfrog.io/artifactory/indy jammy dev rc" > /etc/apt/sources.list.d/hyperledger.list - # todo: Will be removed when libindy will have been replace by indy-vdr, askar, and indy-credx RUN echo "deb [signed-by=/etc/apt/keyrings/ubuntu-bionic.gpg] http://security.ubuntu.com/ubuntu bionic-security main" > /etc/apt/sources.list.d/ubuntu-bionic.list -RUN echo "deb [signed-by=/etc/apt/keyrings/sovrin.gpg] https://repo.sovrin.org/deb bionic master" > /etc/apt/sources.list.d/sovrin.list -RUN echo "deb [signed-by=/etc/apt/keyrings/sovrin.gpg] https://repo.sovrin.org/sdk/deb bionic master" >> /etc/apt/sources.list.d/sovrin.list +RUN echo "deb [signed-by=/etc/apt/keyrings/hyperledger.gpg] https://hyperledger.jfrog.io/artifactory/indy bionic master" > /etc/apt/sources.list.d/hyperledger.list +# - https://github.com/hyperledger/indy-plenum/issues/1546 +# - Needed to pick up rocksdb=5.8.8 +RUN echo "deb [signed-by=/etc/apt/keyrings/hyperledger.gpg] https://hyperledger.jfrog.io/artifactory/indy jammy dev rc" >> /etc/apt/sources.list.d/hyperledger.list RUN apt-get update -y && apt-get install -y \ # Python @@ -79,10 +74,11 @@ RUN apt-get update -y && apt-get install -y \ # - This deb is a special build using the code here: # - https://github.com/ioflo/ioflo/commit/45bcddbf680d22af84469406a04286ff1c79043a # - This line can be removed once the release containing these changes is available on PyPi - python3-ioflo \ + #python3-ioflo \ # Need to move libursa.so to parent dir && mv /usr/lib/ursa/* /usr/lib && rm -rf /usr/lib/ursa + RUN pip3 install -U \ # Required by setup.py setuptools==50.3.2 \ @@ -91,6 +87,7 @@ RUN pip3 install -U \ pep8==1.7.1 \ pep8-naming==0.6.1 \ flake8==3.8.4 \ + ioflo==2.0.3 \ Cython==0.29.36 # install fpm diff --git a/.devcontainer/devcontainer.json b/.devcontainer/devcontainer.json index 596285d4dc..06c1a39a6a 100644 --- a/.devcontainer/devcontainer.json +++ b/.devcontainer/devcontainer.json @@ -10,15 +10,22 @@ }, // Set *default* container specific settings.json values on container create. - "settings": {}, + // Add the IDs of extensions you want installed when the container is created. - "extensions": [ - "mhutchie.git-graph", - "eamodio.gitlens", - "ms-python.python" - ], + "customizations": { + "vscode": { + "extensions": [ + "mhutchie.git-graph", + "eamodio.gitlens", + "ms-python.python" + ], + "settings": { + "python.analysis.extraPaths": ["/home/vscode/.local/lib/python3.10/site-packages"] + } + } + }, // Use 'forwardPorts' to make a list of ports inside the container available locally. // "forwardPorts": [], diff --git a/.github/workflows/build/Dockerfile.ubuntu-2004 b/.github/workflows/build/Dockerfile.ubuntu-2004 index d7ac2b296b..8eb37d52ef 100644 --- a/.github/workflows/build/Dockerfile.ubuntu-2004 +++ b/.github/workflows/build/Dockerfile.ubuntu-2004 @@ -17,23 +17,19 @@ RUN apt-get update -y && apt-get install -y \ # Update repository signing keys # -------------------------------------------------------------------------------------------------------- # Hyperledger -RUN apt-key adv --keyserver keyserver.ubuntu.com --recv-keys 9692C00E657DDE61 && \ - # Sovrin - apt-key adv --keyserver keyserver.ubuntu.com --recv-keys CE7709D068DB5E88 +RUN apt-key adv --keyserver keyserver.ubuntu.com --recv-keys 9692C00E657DDE61 && # ======================================================================================================== # Plenum # - https://github.com/hyperledger/indy-plenum/issues/1546 # - Needed to pick up rocksdb=5.8.8 -RUN echo "deb https://hyperledger.jfrog.io/artifactory/indy focal dev" >> /etc/apt/sources.list && \ - echo "deb http://security.ubuntu.com/ubuntu bionic-security main" >> /etc/apt/sources.list && \ - echo "deb https://repo.sovrin.org/deb bionic master" >> /etc/apt/sources.list && \ - echo "deb https://repo.sovrin.org/sdk/deb bionic master" >> /etc/apt/sources.list - +RUN echo "deb http://security.ubuntu.com/ubuntu bionic-security main" >> /etc/apt/sources.list && \ + echo "deb https://hyperledger.jfrog.io/artifactory/indy bionic master" >> /etc/apt/sources.list && \ + echo "deb https://hyperledger.jfrog.io/artifactory/indy focal dev" >> /etc/apt/sources.list RUN apt-get update -y && apt-get install -y \ # Python python3-pip \ rubygems && \ gem install --no-document dotenv:2.8.1 fpm:1.14.2 && \ - pip3 install Cython==0.29.36 \ No newline at end of file + pip3 install Cython==0.29.36 diff --git a/.github/workflows/build/Dockerfile.ubuntu-2204 b/.github/workflows/build/Dockerfile.ubuntu-2204 index c84d30a94f..c62f7e9de8 100644 --- a/.github/workflows/build/Dockerfile.ubuntu-2204 +++ b/.github/workflows/build/Dockerfile.ubuntu-2204 @@ -25,27 +25,22 @@ RUN gpg --keyserver keyserver.ubuntu.com --recv-keys 9692C00E657DDE61 && \ # bionic key RUN gpg --keyserver keyserver.ubuntu.com --recv-keys 3B4FE6ACC0B21F32 && \ gpg --export 3B4FE6ACC0B21F32 > /etc/apt/keyrings/ubuntu-bionic.gpg -# sovrin key -RUN gpg --keyserver keyserver.ubuntu.com --recv-keys CE7709D068DB5E88 && \ - gpg --export CE7709D068DB5E88 > /etc/apt/keyrings/sovrin.gpg # ======================================================================================================== # ToDo: # - Eliminate dependency on obsolete bionic repositories. # Plenum -# - https://github.com/hyperledger/indy-plenum/issues/1546 -# - Needed to pick up rocksdb=5.8.8 -RUN echo "deb [signed-by=/etc/apt/keyrings/hyperledger.gpg] https://hyperledger.jfrog.io/artifactory/indy jammy dev rc" > /etc/apt/sources.list.d/hyperledger.list - # todo: Will be removed when libindy will have been replace by indy-vdr, askar, and indy-credx RUN echo "deb [signed-by=/etc/apt/keyrings/ubuntu-bionic.gpg] http://security.ubuntu.com/ubuntu bionic-security main" > /etc/apt/sources.list.d/ubuntu-bionic.list -RUN echo "deb [signed-by=/etc/apt/keyrings/sovrin.gpg] https://repo.sovrin.org/deb bionic master" > /etc/apt/sources.list.d/sovrin.list -RUN echo "deb [signed-by=/etc/apt/keyrings/sovrin.gpg] https://repo.sovrin.org/sdk/deb bionic master" >> /etc/apt/sources.list.d/sovrin.list +RUN echo "deb [signed-by=/etc/apt/keyrings/hyperledger.gpg] https://hyperledger.jfrog.io/artifactory/indy bionic master" > /etc/apt/sources.list.d/hyperledger.list + +# - https://github.com/hyperledger/indy-plenum/issues/1546 +# - Needed to pick up rocksdb=5.8.8 +RUN echo "deb [signed-by=/etc/apt/keyrings/hyperledger.gpg] https://hyperledger.jfrog.io/artifactory/indy jammy dev rc" >> /etc/apt/sources.list.d/hyperledger.list RUN apt update -y && apt install -y rubygems python3-pip && apt-get -y autoremove && rm -rf /var/lib/apt/lists/* # install fpm RUN gem install --no-document dotenv:2.8.1 fpm:1.15.0 && \ pip3 install Cython==0.29.36 - diff --git a/.gitpod.Dockerfile b/.gitpod.Dockerfile index 878f047cc7..0ca63c2169 100644 --- a/.gitpod.Dockerfile +++ b/.gitpod.Dockerfile @@ -18,18 +18,15 @@ RUN sudo apt-get update -y && sudo apt-get install -y \ # Update repository signing keys # -------------------------------------------------------------------------------------------------------- # Hyperledger -RUN sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys 9692C00E657DDE61 && \ - # Sovrin - sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys CE7709D068DB5E88 +RUN sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys 9692C00E657DDE61 # ======================================================================================================== # Plenum # - https://github.com/hyperledger/indy-plenum/issues/1546 # - Needed to pick up rocksdb=5.8.8 -RUN sudo add-apt-repository 'deb https://hyperledger.jfrog.io/artifactory/indy focal dev' && \ - sudo add-apt-repository 'deb http://security.ubuntu.com/ubuntu bionic-security main' && \ - sudo add-apt-repository 'deb https://repo.sovrin.org/deb bionic master' && \ - sudo add-apt-repository 'deb https://repo.sovrin.org/sdk/deb bionic master' +RUN echo "deb http://security.ubuntu.com/ubuntu bionic-security main" >> /etc/apt/sources.list && \ + echo "deb https://hyperledger.jfrog.io/artifactory/indy bionic master" >> /etc/apt/sources.list && \ + echo "deb https://hyperledger.jfrog.io/artifactory/indy focal dev" >> /etc/apt/sources.list RUN sudo apt-get update -y && sudo apt-get install -y \ # Python @@ -69,4 +66,4 @@ RUN pip3 install -U \ flake8==3.8.4 # install fpm -RUN sudo gem install --no-document rake dotenv:2.8.1 fpm:1.14.2 \ No newline at end of file +RUN sudo gem install --no-document rake dotenv:2.8.1 fpm:1.14.2 diff --git a/build-scripts/ubuntu-2204/build-3rd-parties.sh b/build-scripts/ubuntu-2204/build-3rd-parties.sh index c779800e36..2244393f6a 100755 --- a/build-scripts/ubuntu-2204/build-3rd-parties.sh +++ b/build-scripts/ubuntu-2204/build-3rd-parties.sh @@ -185,4 +185,8 @@ build_from_pypi_wheel six build_from_pypi_wheel sortedcontainers 2.1.0 build_from_pypi_wheel ujson 1.33 +build_from_pypi_wheel indy-vdr 0.4.2 +build_from_pypi_wheel aries-askar 0.4.3 +build_from_pypi_wheel indy-credx 1.1.1 + rm -vf ${OUTPUT_PATH}/python3-setuptools*.deb diff --git a/dev-setup/ubuntu/ubuntu-2004/SetupVMTest.txt b/dev-setup/ubuntu/ubuntu-2004/SetupVMTest.txt index e09b761922..bbccbfd4e9 100644 --- a/dev-setup/ubuntu/ubuntu-2004/SetupVMTest.txt +++ b/dev-setup/ubuntu/ubuntu-2004/SetupVMTest.txt @@ -3,9 +3,8 @@ ##Pre-Install sudo apt-get update && sudo apt-get install -y apt-transport-https ca-certificates - sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys CE7709D068DB5E88 || sudo apt-key adv --keyserver hkp://keyserver.ubuntu.com:80 --recv-keys CE7709D068DB5E88 - sudo echo "deb https://repo.sovrin.org/deb bionic master" >> /etc/apt/sources.list - sudo echo "deb https://repo.sovrin.org/deb bionic stable" >> /etc/apt/sources.list + sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys 9692C00E657DDE61 || sudo apt-key adv --keyserver hkp://keyserver.ubuntu.com:80 --recv-keys 9692C00E657DDE61 + sudo echo "deb https://hyperledger.jfrog.io/artifactory/indy bionic master" >> /etc/apt/sources.list sudo echo "deb http://security.ubuntu.com/ubuntu bionic-security main" >> /etc/apt/sources.list sudo apt-get update && sudo apt-get install -y \ @@ -72,7 +71,9 @@ python-dateutil==2.6.1 \ python-rocksdb==0.7.0 \ python-ursa==0.1.1 \ - python3-indy==1.13.0 \ + indy_vdr==0.4.0.dev5 \ + aries-askar==0.2.7 \ + indy-credx==0.3.1 \ pyzmq==22.3.0 --install-option=--zmq=bundled \ rlp==0.6.0 \ semver==2.13.0 \ diff --git a/plenum/test/audit_ledger/test_audit_ledger_multiple_ledgers_in_one_batch.py b/plenum/test/audit_ledger/test_audit_ledger_multiple_ledgers_in_one_batch.py index bd07218e7e..92e3ebed91 100644 --- a/plenum/test/audit_ledger/test_audit_ledger_multiple_ledgers_in_one_batch.py +++ b/plenum/test/audit_ledger/test_audit_ledger_multiple_ledgers_in_one_batch.py @@ -1,9 +1,13 @@ +import base58 + from plenum.common.constants import TXN_TYPE, TARGET_NYM, AUDIT_TXN_LEDGER_ROOT, AUDIT_TXN_STATE_ROOT, TXN_PAYLOAD, \ - TXN_PAYLOAD_DATA, TXN_METADATA, TXN_METADATA_SEQ_NO, TXN_AUTHOR_AGREEMENT_AML, AML_VERSION, ROLE, DATA, ALIAS + TXN_PAYLOAD_DATA, TXN_METADATA, TXN_METADATA_SEQ_NO, TXN_AUTHOR_AGREEMENT_AML, AML_VERSION, ROLE, DATA, ALIAS, CURRENT_PROTOCOL_VERSION from plenum.common.ledger import Ledger from plenum.common.transactions import PlenumTransactions from plenum.server.batch_handlers.three_pc_batch import ThreePcBatch -from plenum.test.helper import sdk_gen_request +from plenum.test.helper import vdr_gen_request, gen_request_plenum +from plenum.test.wallet_helper import vdr_create_and_store_did +from indy_vdr.ledger import build_nym_request, build_pool_config_request def test_audit_ledger_multiple_ledgers_in_one_batch(txnPoolNodeSet): @@ -12,13 +16,16 @@ def test_audit_ledger_multiple_ledgers_in_one_batch(txnPoolNodeSet): audit_batch_handler = node.write_manager.audit_b_handler op = { TXN_TYPE: PlenumTransactions.NYM.value, - TARGET_NYM: "000000000000000000000000Trustee4" + TARGET_NYM: "000000000000000000000000Trustee4" # This is a seed not a did } - nym_req = sdk_gen_request(op, signatures={"sig1": "111"}) - node.write_manager.apply_request(nym_req, 10000) + #_, did = sdk_wallet_client + #target_did, verkey = looper.loop.run_until_complete(create_and_store_did(sdk_wallet_handle, seed=op[TARGET_NYM])) + #nym_req = build_nym_request(did, target_did, verkey, version=CURRENT_PROTOCOL_VERSION) + nym_req_plenum = gen_request_plenum(op, signatures={"sig1": "111"}) + node.write_manager.apply_request(nym_req_plenum, 10000) op2 = {TXN_TYPE: TXN_AUTHOR_AGREEMENT_AML, AML_VERSION: "version1"} - pool_config_req = sdk_gen_request(op2, signatures={"sig1": "111"}) + pool_config_req = gen_request_plenum(op2, signatures={"sig1": "111"}) node.write_manager.apply_request(pool_config_req, 10000) domain_root_hash = Ledger.hashToStr(node.domainLedger.uncommittedRootHash) @@ -41,11 +48,11 @@ def test_audit_ledger_multiple_ledgers_in_one_batch(txnPoolNodeSet): TXN_TYPE: PlenumTransactions.NYM.value, TARGET_NYM: "000000000000000000000000Trustee5" } - nym_req = sdk_gen_request(op, signatures={"sig1": "111"}) + nym_req = gen_request_plenum(op, signatures={"sig1": "111"}) node.write_manager.apply_request(nym_req, 10000) op2 = {TXN_TYPE: TXN_AUTHOR_AGREEMENT_AML, AML_VERSION: "version2"} - pool_config_req = sdk_gen_request(op2, signatures={"sig1": "111"}) + pool_config_req = gen_request_plenum(op2, signatures={"sig1": "111"}) node.write_manager.apply_request(pool_config_req, 10000) # Checking second batch created @@ -74,11 +81,11 @@ def test_multiple_ledgers_in_second_batch_apply_first_time(txnPoolNodeSet): TARGET_NYM: "000000000000000000000000Trustee4", ROLE: None } - nym_req = sdk_gen_request(op, signatures={"sig1": "111"}) + nym_req = gen_request_plenum(op, signatures={"sig1": "111"}) node.write_manager.apply_request(nym_req, 10000) op2 = {TXN_TYPE: TXN_AUTHOR_AGREEMENT_AML, AML_VERSION: "version2"} - pool_config_req = sdk_gen_request(op2, signatures={"sig1": "111"}) + pool_config_req = gen_request_plenum(op2, signatures={"sig1": "111"}) node.write_manager.apply_request(pool_config_req, 10000) domain_root_hash = Ledger.hashToStr(node.domainLedger.uncommittedRootHash) @@ -94,12 +101,12 @@ def test_multiple_ledgers_in_second_batch_apply_first_time(txnPoolNodeSet): TARGET_NYM: "000000000000000000000000Trustee1", DATA: {ALIAS: "Node100"} } - node_req = sdk_gen_request(op2, signatures={"sig1": "111"}) + node_req = gen_request_plenum(op2, signatures={"sig1": "111"}) node.write_manager.apply_request(node_req, 10000) op2 = {TXN_TYPE: TXN_AUTHOR_AGREEMENT_AML, AML_VERSION: "version2"} - pool_config_req = sdk_gen_request(op2, signatures={"sig1": "111"}) + pool_config_req = gen_request_plenum(op2, signatures={"sig1": "111"}) node.write_manager.apply_request(pool_config_req, 10000) pool_root_hash = Ledger.hashToStr(node.poolLedger.uncommittedRootHash) diff --git a/plenum/test/audit_ledger/test_audit_ledger_ordering.py b/plenum/test/audit_ledger/test_audit_ledger_ordering.py index 70c0897314..085b12c08b 100644 --- a/plenum/test/audit_ledger/test_audit_ledger_ordering.py +++ b/plenum/test/audit_ledger/test_audit_ledger_ordering.py @@ -1,11 +1,11 @@ from plenum.common.constants import DOMAIN_LEDGER_ID, POOL_LEDGER_ID from plenum.test.audit_ledger.helper import check_audit_ledger_updated, check_audit_txn from plenum.test.bls.helper import sdk_change_bls_key -from plenum.test.helper import sdk_send_random_and_check +from plenum.test.helper import vdr_send_random_and_check def test_audit_ledger_updated_after_ordering(looper, txnPoolNodeSet, - sdk_pool_handle, sdk_wallet_client, sdk_wallet_stewards, + vdr_pool_handle, vdr_wallet_client, vdr_wallet_stewards, initial_domain_size, initial_pool_size, initial_config_size, view_no, pp_seq_no, initial_seq_no): @@ -16,7 +16,7 @@ def test_audit_ledger_updated_after_ordering(looper, txnPoolNodeSet, # 1st domain txn audit_size_initial = [node.auditLedger.size for node in txnPoolNodeSet] - sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_client, 1) + vdr_send_random_and_check(looper, txnPoolNodeSet, vdr_pool_handle, vdr_wallet_client, 1) check_audit_ledger_updated(audit_size_initial, txnPoolNodeSet, audit_txns_added=1) for node in txnPoolNodeSet: @@ -46,7 +46,7 @@ def test_audit_ledger_updated_after_ordering(looper, txnPoolNodeSet, ) # 2d domain txn - sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_client, 1) + vdr_send_random_and_check(looper, txnPoolNodeSet, vdr_pool_handle, vdr_wallet_client, 1) check_audit_ledger_updated(audit_size_initial, txnPoolNodeSet, audit_txns_added=2) for node in txnPoolNodeSet: @@ -76,8 +76,8 @@ def test_audit_ledger_updated_after_ordering(looper, txnPoolNodeSet, # 1st pool txn sdk_change_bls_key(looper, txnPoolNodeSet, txnPoolNodeSet[3], - sdk_pool_handle, - sdk_wallet_stewards[3], + vdr_pool_handle, + vdr_wallet_stewards[3], check_functional=False) check_audit_ledger_updated(audit_size_initial, txnPoolNodeSet, audit_txns_added=3) @@ -108,8 +108,8 @@ def test_audit_ledger_updated_after_ordering(looper, txnPoolNodeSet, # 2d pool txn sdk_change_bls_key(looper, txnPoolNodeSet, txnPoolNodeSet[3], - sdk_pool_handle, - sdk_wallet_stewards[3], + vdr_pool_handle, + vdr_wallet_stewards[3], check_functional=False) check_audit_ledger_updated(audit_size_initial, txnPoolNodeSet, audit_txns_added=4) @@ -138,7 +138,7 @@ def test_audit_ledger_updated_after_ordering(looper, txnPoolNodeSet, ) # one more domain txn - sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_client, 1) + vdr_send_random_and_check(looper, txnPoolNodeSet, vdr_pool_handle, vdr_wallet_client, 1) check_audit_ledger_updated(audit_size_initial, txnPoolNodeSet, audit_txns_added=5) for node in txnPoolNodeSet: diff --git a/plenum/test/audit_ledger/test_audit_ledger_view_change.py b/plenum/test/audit_ledger/test_audit_ledger_view_change.py index 57a7702423..1ff022b212 100644 --- a/plenum/test/audit_ledger/test_audit_ledger_view_change.py +++ b/plenum/test/audit_ledger/test_audit_ledger_view_change.py @@ -6,8 +6,8 @@ from plenum.common.constants import DOMAIN_LEDGER_ID, STEWARD_STRING from plenum.test.audit_ledger.helper import check_audit_ledger_updated, check_audit_txn -from plenum.test.helper import sdk_send_random_and_check, assertExp, get_pp_seq_no -from plenum.test.pool_transactions.helper import sdk_add_new_nym, sdk_add_new_node +from plenum.test.helper import vdr_send_random_and_check, assertExp, get_pp_seq_no +from plenum.test.pool_transactions.helper import vdr_add_new_nym, vdr_add_new_node from plenum.test.test_node import checkNodesConnected, ensureElectionsDone from stp_core.loop.eventually import eventually @@ -16,7 +16,7 @@ @pytest.mark.skip(reason="INDY-2276. Issue with adding node that will change f value") def test_audit_ledger_view_change(looper, txnPoolNodeSet, - sdk_pool_handle, sdk_wallet_client, sdk_wallet_steward, + vdr_pool_handle, vdr_wallet_client, vdr_wallet_steward, initial_domain_size, initial_pool_size, initial_config_size, tdir, tconf, @@ -36,9 +36,9 @@ def test_audit_ledger_view_change(looper, txnPoolNodeSet, other_nodes = txnPoolNodeSet[:-1] slow_node = txnPoolNodeSet[-1] # Add a new steward for creating a new node - new_steward_wallet_handle = sdk_add_new_nym(looper, - sdk_pool_handle, - sdk_wallet_steward, + new_steward_wallet_handle = vdr_add_new_nym(looper, + vdr_pool_handle, + vdr_wallet_steward, alias="newSteward", role=STEWARD_STRING) @@ -50,8 +50,8 @@ def test_audit_ledger_view_change(looper, txnPoolNodeSet, with delay_rules([n.nodeIbStasher for n in txnPoolNodeSet], icDelay()): # Send NODE txn fo 7th node - new_node = sdk_add_new_node(looper, - sdk_pool_handle, + new_node = vdr_add_new_node(looper, + vdr_pool_handle, new_steward_wallet_handle, "Theta", tdir, @@ -61,8 +61,8 @@ def test_audit_ledger_view_change(looper, txnPoolNodeSet, txnPoolNodeSet.append(new_node) looper.run(checkNodesConnected(other_nodes + [new_node])) - sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle, - sdk_wallet_client, 1) + vdr_send_random_and_check(looper, txnPoolNodeSet, vdr_pool_handle, + vdr_wallet_client, 1) check_audit_ledger_updated(audit_size_initial, [slow_node], audit_txns_added=0) diff --git a/plenum/test/audit_ledger/test_audit_ledger_with_node_reg_feature_added.py b/plenum/test/audit_ledger/test_audit_ledger_with_node_reg_feature_added.py index ea4887ba1e..139f29b016 100644 --- a/plenum/test/audit_ledger/test_audit_ledger_with_node_reg_feature_added.py +++ b/plenum/test/audit_ledger/test_audit_ledger_with_node_reg_feature_added.py @@ -1,6 +1,6 @@ from plenum.common.constants import AUDIT_TXN_NODE_REG from plenum.common.txn_util import get_payload_data -from plenum.test.helper import sdk_send_random_and_check +from plenum.test.helper import vdr_send_random_and_check def patch_node_reg_in_audit(node, monkeypatch): @@ -13,13 +13,13 @@ def patch_node_reg_in_audit(node, monkeypatch): def test_audit_ledger_with_node_reg_feature_added(looper, tconf, txnPoolNodeSet, monkeypatch, - sdk_pool_handle, sdk_wallet_client): + vdr_pool_handle, vdr_wallet_client): # 1. patch audit ledger to not store node reg at all for n in txnPoolNodeSet: patch_node_reg_in_audit(n, monkeypatch) # 2. order a txn - sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_client, 1) + vdr_send_random_and_check(looper, txnPoolNodeSet, vdr_pool_handle, vdr_wallet_client, 1) # 3. check that node reg is not present in the latest audit txn for node in txnPoolNodeSet: @@ -29,7 +29,7 @@ def test_audit_ledger_with_node_reg_feature_added(looper, tconf, txnPoolNodeSet, monkeypatch.undo() # 5. order a txn - sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_client, 1) + vdr_send_random_and_check(looper, txnPoolNodeSet, vdr_pool_handle, vdr_wallet_client, 1) # 6. check that node reg is present in the latest audit txn for node in txnPoolNodeSet: diff --git a/plenum/test/audit_ledger/test_audit_multiple_uncommitted_node_regs.py b/plenum/test/audit_ledger/test_audit_multiple_uncommitted_node_regs.py index 3c0e00dfd3..35b6ae1860 100644 --- a/plenum/test/audit_ledger/test_audit_multiple_uncommitted_node_regs.py +++ b/plenum/test/audit_ledger/test_audit_multiple_uncommitted_node_regs.py @@ -1,15 +1,15 @@ from plenum.test.delayers import cDelay, icDelay from plenum.test.node_catchup.helper import ensure_all_nodes_have_same_data -from plenum.test.node_request.helper import sdk_ensure_pool_functional +from plenum.test.node_request.helper import vdr_ensure_pool_functional from plenum.test.stasher import delay_rules, start_delaying from plenum.test.view_change.helper import add_new_node def test_audit_multiple_uncommitted_node_regs(looper, tdir, tconf, allPluginsPath, txnPoolNodeSet, - sdk_pool_handle, - sdk_wallet_client, - sdk_wallet_steward): + vdr_pool_handle, + vdr_wallet_client, + vdr_wallet_steward): ''' - Delay COMMITS on 1 Node - Add 2 more nodes (so that the slow node hs multiple uncommitted node txns) @@ -26,8 +26,8 @@ def test_audit_multiple_uncommitted_node_regs(looper, tdir, tconf, allPluginsPat # Add Node5 new_node = add_new_node(looper, fast_nodes, - sdk_pool_handle, - sdk_wallet_steward, + vdr_pool_handle, + vdr_wallet_steward, tdir, tconf, allPluginsPath, @@ -39,8 +39,8 @@ def test_audit_multiple_uncommitted_node_regs(looper, tdir, tconf, allPluginsPat # Add Node6 new_node = add_new_node(looper, fast_nodes, - sdk_pool_handle, - sdk_wallet_steward, + vdr_pool_handle, + vdr_wallet_steward, tdir, tconf, allPluginsPath, @@ -50,4 +50,4 @@ def test_audit_multiple_uncommitted_node_regs(looper, tdir, tconf, allPluginsPat start_delaying(new_node.nodeIbStasher, icDelay()) ensure_all_nodes_have_same_data(looper, txnPoolNodeSet, custom_timeout=20) - sdk_ensure_pool_functional(looper, txnPoolNodeSet, sdk_wallet_client, sdk_pool_handle) + vdr_ensure_pool_functional(looper, txnPoolNodeSet, vdr_wallet_client, vdr_pool_handle) diff --git a/plenum/test/audit_ledger/test_demote_backup_primary.py b/plenum/test/audit_ledger/test_demote_backup_primary.py index 6bc6467127..03b3e32fc9 100644 --- a/plenum/test/audit_ledger/test_demote_backup_primary.py +++ b/plenum/test/audit_ledger/test_demote_backup_primary.py @@ -1,6 +1,6 @@ import pytest -from plenum.test.helper import sdk_send_random_and_check, waitForViewChange, view_change_timeout +from plenum.test.helper import vdr_send_random_and_check, waitForViewChange, view_change_timeout from plenum.test.node_catchup.helper import ensure_all_nodes_have_same_data from plenum.test.pool_transactions.helper import demote_node, disconnect_node_and_ensure_disconnected from plenum.test.test_node import ensureElectionsDone, checkNodesConnected @@ -15,13 +15,13 @@ def tconf(tconf): yield tconf -def test_demote_backup_primary(looper, txnPoolNodeSet, sdk_pool_handle, - sdk_wallet_stewards, tdir, tconf, allPluginsPath): +def test_demote_backup_primary(looper, txnPoolNodeSet, vdr_pool_handle, + vdr_wallet_stewards, tdir, tconf, allPluginsPath): assert len(txnPoolNodeSet) == 6 view_no = txnPoolNodeSet[-1].viewNo - sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle, - sdk_wallet_stewards[0], 1) + vdr_send_random_and_check(looper, txnPoolNodeSet, vdr_pool_handle, + vdr_wallet_stewards[0], 1) node_to_restart = txnPoolNodeSet[-1] node_to_demote = steward_for_demote_node = demote_node_index = None @@ -29,13 +29,13 @@ def test_demote_backup_primary(looper, txnPoolNodeSet, sdk_pool_handle, for i, n in enumerate(txnPoolNodeSet): if n.name == txnPoolNodeSet[0].primaries[1]: node_to_demote = n - steward_for_demote_node = sdk_wallet_stewards[i] + steward_for_demote_node = vdr_wallet_stewards[i] demote_node_index = i break assert node_to_demote - demote_node(looper, steward_for_demote_node, sdk_pool_handle, + demote_node(looper, steward_for_demote_node, vdr_pool_handle, node_to_demote) del txnPoolNodeSet[demote_node_index] @@ -54,6 +54,6 @@ def test_demote_backup_primary(looper, txnPoolNodeSet, sdk_pool_handle, looper.run(checkNodesConnected(txnPoolNodeSet)) ensure_all_nodes_have_same_data(looper, txnPoolNodeSet) - sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle, - sdk_wallet_stewards[0], 1) + vdr_send_random_and_check(looper, txnPoolNodeSet, vdr_pool_handle, + vdr_wallet_stewards[0], 1) ensure_all_nodes_have_same_data(looper, txnPoolNodeSet, custom_timeout=20) diff --git a/plenum/test/audit_ledger/test_demote_backup_primary_without_view_change.py b/plenum/test/audit_ledger/test_demote_backup_primary_without_view_change.py index df79d84039..321781d4de 100644 --- a/plenum/test/audit_ledger/test_demote_backup_primary_without_view_change.py +++ b/plenum/test/audit_ledger/test_demote_backup_primary_without_view_change.py @@ -1,5 +1,5 @@ from plenum.test.delayers import icDelay -from plenum.test.helper import sdk_send_random_and_check +from plenum.test.helper import vdr_send_random_and_check from plenum.test.node_catchup.helper import ensure_all_nodes_have_same_data from plenum.test.pool_transactions.helper import demote_node, disconnect_node_and_ensure_disconnected from plenum.test.stasher import delay_rules @@ -10,12 +10,12 @@ whitelist = ['Audit ledger has inconsistent names of primaries', ] -def test_demote_backup_primary_without_view_change(looper, txnPoolNodeSet, sdk_pool_handle, - sdk_wallet_stewards, tdir, tconf, allPluginsPath): +def test_demote_backup_primary_without_view_change(looper, txnPoolNodeSet, vdr_pool_handle, + vdr_wallet_stewards, tdir, tconf, allPluginsPath): assert len(txnPoolNodeSet) > 4 - sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle, - sdk_wallet_stewards[0], 1) + vdr_send_random_and_check(looper, txnPoolNodeSet, vdr_pool_handle, + vdr_wallet_stewards[0], 1) lagging_instance = 1 node_to_restart = txnPoolNodeSet[-1] @@ -24,7 +24,7 @@ def test_demote_backup_primary_without_view_change(looper, txnPoolNodeSet, sdk_p for i, n in enumerate(txnPoolNodeSet): if n.name == txnPoolNodeSet[0].primaries[lagging_instance]: node_to_demote = n - steward_for_demote_node = sdk_wallet_stewards[i] + steward_for_demote_node = vdr_wallet_stewards[i] demote_node_index = i break assert node_to_demote @@ -33,7 +33,7 @@ def test_demote_backup_primary_without_view_change(looper, txnPoolNodeSet, sdk_p for n in txnPoolNodeSet if n != txnPoolNodeSet[demote_node_index]], icDelay()): - demote_node(looper, steward_for_demote_node, sdk_pool_handle, + demote_node(looper, steward_for_demote_node, vdr_pool_handle, node_to_demote) del txnPoolNodeSet[demote_node_index] @@ -47,8 +47,8 @@ def test_demote_backup_primary_without_view_change(looper, txnPoolNodeSet, sdk_p looper.run(checkNodesConnected(txnPoolNodeSet)) ensure_all_nodes_have_same_data(looper, txnPoolNodeSet) - sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle, - sdk_wallet_stewards[0], 1) + vdr_send_random_and_check(looper, txnPoolNodeSet, vdr_pool_handle, + vdr_wallet_stewards[0], 1) ensure_all_nodes_have_same_data(looper, txnPoolNodeSet) for instance_id, r in node_to_restart.replicas.items(): if instance_id == lagging_instance: diff --git a/plenum/test/audit_ledger/test_do_not_select_primaries_from_audit_on_startup.py b/plenum/test/audit_ledger/test_do_not_select_primaries_from_audit_on_startup.py index 00aafe257d..8d49310f59 100644 --- a/plenum/test/audit_ledger/test_do_not_select_primaries_from_audit_on_startup.py +++ b/plenum/test/audit_ledger/test_do_not_select_primaries_from_audit_on_startup.py @@ -1,7 +1,7 @@ from plenum.common.constants import AUDIT_TXN_PRIMARIES -from plenum.test.helper import sdk_send_random_and_check +from plenum.test.helper import vdr_send_random_and_check from plenum.test.node_catchup.helper import ensure_all_nodes_have_same_data -from plenum.test.node_request.helper import sdk_ensure_pool_functional +from plenum.test.node_request.helper import vdr_ensure_pool_functional from plenum.test.restart.helper import restart_nodes from plenum.test.test_node import ensureElectionsDone @@ -18,13 +18,13 @@ def patch_primaries_in_audit(node, monkeypatch): def test_first_audit_catchup_during_ordering(monkeypatch, looper, tconf, tdir, allPluginsPath, txnPoolNodeSet, - sdk_pool_handle, sdk_wallet_client): + vdr_pool_handle, vdr_wallet_client): # 1. patch primaries in audit ledger for n in txnPoolNodeSet: patch_primaries_in_audit(n, monkeypatch) # 2. order a txn - sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_client, 1) + vdr_send_random_and_check(looper, txnPoolNodeSet, vdr_pool_handle, vdr_wallet_client, 1) # 3. restart Nodes 3 and 4 restart_nodes(looper, txnPoolNodeSet, txnPoolNodeSet[2:], tconf, tdir, allPluginsPath, start_one_by_one=False) @@ -34,4 +34,4 @@ def test_first_audit_catchup_during_ordering(monkeypatch, # 5. make sure that all node have equal Priamries and can order ensureElectionsDone(looper, txnPoolNodeSet, customTimeout=30) ensure_all_nodes_have_same_data(looper, txnPoolNodeSet, custom_timeout=20) - sdk_ensure_pool_functional(looper, txnPoolNodeSet, sdk_wallet_client, sdk_pool_handle) + vdr_ensure_pool_functional(looper, txnPoolNodeSet, vdr_wallet_client, vdr_pool_handle) diff --git a/plenum/test/audit_ledger/test_first_audit_catchup_during_ordering.py b/plenum/test/audit_ledger/test_first_audit_catchup_during_ordering.py index a6c9c46324..8e0b272704 100644 --- a/plenum/test/audit_ledger/test_first_audit_catchup_during_ordering.py +++ b/plenum/test/audit_ledger/test_first_audit_catchup_during_ordering.py @@ -5,7 +5,7 @@ from plenum.common.messages.node_messages import MessageReq, CatchupReq from plenum.server.catchup.node_leecher_service import NodeLeecherService from plenum.test.delayers import ppDelay, pDelay, cDelay, DEFAULT_DELAY -from plenum.test.helper import sdk_send_random_and_check +from plenum.test.helper import vdr_send_random_and_check from plenum.test.node_request.test_timestamp.helper import get_timestamp_suspicion_count from plenum.test.node_catchup.helper import ensure_all_nodes_have_same_data from plenum.test.stasher import delay_rules, start_delaying, stop_delaying_and_process @@ -26,7 +26,7 @@ def delay(msg): return delay -def test_first_audit_catchup_during_ordering(tdir, tconf, looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_client): +def test_first_audit_catchup_during_ordering(tdir, tconf, looper, txnPoolNodeSet, vdr_pool_handle, vdr_wallet_client): lagging_node = txnPoolNodeSet[-1] other_nodes = txnPoolNodeSet[:-1] other_stashers = [node.nodeIbStasher for node in other_nodes] @@ -48,8 +48,8 @@ def check_lagging_node_is_not_syncing_audit(): assert lagging_node_state() != NodeLeecherService.State.Idle # Order request on all nodes except lagging one where they goes to stashed state - sdk_send_random_and_check(looper, txnPoolNodeSet, - sdk_pool_handle, sdk_wallet_client, 1) + vdr_send_random_and_check(looper, txnPoolNodeSet, + vdr_pool_handle, vdr_wallet_client, 1) # Now catchup should end and lagging node starts processing stashed PPs # and resumes ordering diff --git a/plenum/test/audit_ledger/test_node_reg_in_ordered.py b/plenum/test/audit_ledger/test_node_reg_in_ordered.py index f8499753db..f795da68df 100644 --- a/plenum/test/audit_ledger/test_node_reg_in_ordered.py +++ b/plenum/test/audit_ledger/test_node_reg_in_ordered.py @@ -4,9 +4,9 @@ from plenum.test.helper import create_pre_prepare_no_bls, generate_state_root -def test_node_reg_in_ordered_from_audit(test_node): +def test_node_reg_in_ordered_from_audit(vdr_test_node): pre_prepare = create_pre_prepare_no_bls(state_root=generate_state_root(), pp_seq_no=1) - replica = test_node.master_replica + replica = vdr_test_node.master_replica key = (pre_prepare.viewNo, pre_prepare.ppSeqNo) replica._ordering_service.prePrepares[key] = pre_prepare replica._consensus_data.preprepared.append(preprepare_to_batch_id(pre_prepare)) @@ -16,14 +16,14 @@ def test_node_reg_in_ordered_from_audit(test_node): valid_digests=pre_prepare.reqIdr) three_pc_batch.node_reg = ["Alpha", "Beta", "Gamma", "Delta", "Eta"] three_pc_batch.primaries = ["Alpha", "Beta"] - test_node.write_manager.audit_b_handler.post_batch_applied(three_pc_batch) + vdr_test_node.write_manager.audit_b_handler.post_batch_applied(three_pc_batch) assert replica._ordering_service._get_node_reg_for_ordered(pre_prepare) == three_pc_batch.node_reg -def test_node_reg_in_ordered_from_audit_for_tree_txns(test_node): +def test_node_reg_in_ordered_from_audit_for_tree_txns(vdr_test_node): node_regs = {} - replica = test_node.master_replica + replica = vdr_test_node.master_replica node_reg = ["Alpha", "Beta", "Gamma", "Delta", "Eta"] for i in range(3): pp = create_pre_prepare_no_bls(state_root=generate_state_root(), @@ -37,7 +37,7 @@ def test_node_reg_in_ordered_from_audit_for_tree_txns(test_node): valid_digests=pp.reqIdr) three_pc_batch.node_reg = node_reg + ["Node{}".format(i + 10)] three_pc_batch.primaries = ["Alpha", "Beta"] - test_node.write_manager.audit_b_handler.post_batch_applied(three_pc_batch) + vdr_test_node.write_manager.audit_b_handler.post_batch_applied(three_pc_batch) node_regs[key] = three_pc_batch.node_reg for key in reversed(list(node_regs.keys())): diff --git a/plenum/test/audit_ledger/test_primaries_in_ordered.py b/plenum/test/audit_ledger/test_primaries_in_ordered.py index 47bf2df65f..f02b28afe1 100644 --- a/plenum/test/audit_ledger/test_primaries_in_ordered.py +++ b/plenum/test/audit_ledger/test_primaries_in_ordered.py @@ -3,9 +3,9 @@ from plenum.test.helper import create_pre_prepare_no_bls, generate_state_root -def test_primaries_in_ordered_from_audit(test_node): +def test_primaries_in_ordered_from_audit(vdr_test_node): pre_prepare = create_pre_prepare_no_bls(state_root=generate_state_root(), pp_seq_no=1) - replica = test_node.master_replica + replica = vdr_test_node.master_replica key = (pre_prepare.viewNo, pre_prepare.ppSeqNo) replica._ordering_service.prePrepares[key] = pre_prepare replica._consensus_data.preprepared.append(preprepare_to_batch_id(pre_prepare)) @@ -15,14 +15,14 @@ def test_primaries_in_ordered_from_audit(test_node): valid_digests=pre_prepare.reqIdr) three_pc_batch.primaries = ["Alpha", "Beta"] three_pc_batch.node_reg = ["Alpha", "Beta", "Gamma", "Delta", "Eta"] - test_node.write_manager.audit_b_handler.post_batch_applied(three_pc_batch) + vdr_test_node.write_manager.audit_b_handler.post_batch_applied(three_pc_batch) assert replica._ordering_service._get_primaries_for_ordered(pre_prepare) == three_pc_batch.primaries -def test_primaries_in_ordered_from_audit_for_tree_txns(test_node): +def test_primaries_in_ordered_from_audit_for_tree_txns(vdr_test_node): primaries = {} - replica = test_node.master_replica + replica = vdr_test_node.master_replica for i in range(3): pp = create_pre_prepare_no_bls(state_root=generate_state_root(), pp_seq_no=i) @@ -35,7 +35,7 @@ def test_primaries_in_ordered_from_audit_for_tree_txns(test_node): valid_digests=pp.reqIdr) three_pc_batch.primaries = ["Node{}".format(num) for num in range(i + 1)] three_pc_batch.node_reg = ["Alpha", "Beta", "Gamma", "Delta", "Eta"] - test_node.write_manager.audit_b_handler.post_batch_applied(three_pc_batch) + vdr_test_node.write_manager.audit_b_handler.post_batch_applied(three_pc_batch) primaries[key] = three_pc_batch.primaries for key in reversed(list(primaries.keys())): diff --git a/plenum/test/batching_3pc/catch-up/test_3pc_paused_during_catch_up.py b/plenum/test/batching_3pc/catch-up/test_3pc_paused_during_catch_up.py index e8bcf2b782..35b7091ac8 100644 --- a/plenum/test/batching_3pc/catch-up/test_3pc_paused_during_catch_up.py +++ b/plenum/test/batching_3pc/catch-up/test_3pc_paused_during_catch_up.py @@ -1,10 +1,10 @@ from plenum.common.messages.node_messages import Prepare from plenum.test.batching_3pc.helper import make_node_syncing, fail_on_execute_batch_on_master from plenum.test.test_node import getNonPrimaryReplicas -from plenum.test.helper import sdk_send_random_requests +from plenum.test.helper import vdr_send_random_requests -def test_sdk_no_ordering_during_syncup(tconf, looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_client): +def test_sdk_no_ordering_during_syncup(tconf, looper, txnPoolNodeSet, vdr_pool_handle, vdr_wallet_client): non_primary_replica = getNonPrimaryReplicas(txnPoolNodeSet, instId=0)[0] # Put non-primary Node to syncing state once first Prepare is recieved @@ -15,5 +15,5 @@ def test_sdk_no_ordering_during_syncup(tconf, looper, txnPoolNodeSet, sdk_pool_h # Send requests. The non-primary Node should not fail since no ordering is # called while syncing - sdk_send_random_requests(looper, sdk_pool_handle, sdk_wallet_client, tconf.Max3PCBatchSize) + vdr_send_random_requests(looper, vdr_pool_handle, vdr_wallet_client, tconf.Max3PCBatchSize) looper.runFor(5) diff --git a/plenum/test/batching_3pc/catch-up/test_catchup_during_3pc.py b/plenum/test/batching_3pc/catch-up/test_catchup_during_3pc.py index 175cacd027..48abd9840a 100644 --- a/plenum/test/batching_3pc/catch-up/test_catchup_during_3pc.py +++ b/plenum/test/batching_3pc/catch-up/test_catchup_during_3pc.py @@ -4,8 +4,8 @@ from plenum.test import waits from plenum.test.delayers import cDelay from plenum.test.node_catchup.helper import waitNodeDataEquality, ensure_all_nodes_have_same_data -from plenum.test.helper import sdk_send_random_requests, check_last_ordered_3pc_on_master, \ - sdk_send_random_and_check, sdk_get_replies, max_3pc_batch_limits, assert_eq +from plenum.test.helper import vdr_send_random_requests, check_last_ordered_3pc_on_master, \ + vdr_send_random_and_check, vdr_get_replies, max_3pc_batch_limits, assert_eq from plenum.test.stasher import delay_rules from stp_core.loop.eventually import eventually @@ -16,7 +16,7 @@ def tconf(tconf): yield tconf -def test_catchup_during_3pc(tconf, looper, txnPoolNodeSet, sdk_wallet_client, sdk_pool_handle): +def test_catchup_during_3pc(tconf, looper, txnPoolNodeSet, vdr_wallet_client, vdr_pool_handle): ''' 1) Send 1 3PC batch + 2 reqs 2) Delay commits on one node @@ -31,8 +31,8 @@ def test_catchup_during_3pc(tconf, looper, txnPoolNodeSet, sdk_wallet_client, sd rest_nodes = txnPoolNodeSet[:-1] with delay_rules(lagging_node.nodeIbStasher, cDelay()): - sdk_reqs = sdk_send_random_requests(looper, sdk_pool_handle, - sdk_wallet_client, tconf.Max3PCBatchSize + 2) + sdk_reqs = vdr_send_random_requests(looper, vdr_pool_handle, + vdr_wallet_client, tconf.Max3PCBatchSize + 2) looper.run( eventually(check_last_ordered_3pc_on_master, rest_nodes, (0, 1)) @@ -49,9 +49,9 @@ def test_catchup_during_3pc(tconf, looper, txnPoolNodeSet, sdk_wallet_client, sd waitNodeDataEquality(looper, *txnPoolNodeSet, customTimeout=5) - sdk_get_replies(looper, sdk_reqs) + vdr_get_replies(looper, sdk_reqs) - sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle, - sdk_wallet_client, 2 * tconf.Max3PCBatchSize - 2) + vdr_send_random_and_check(looper, txnPoolNodeSet, vdr_pool_handle, + vdr_wallet_client, 2 * tconf.Max3PCBatchSize - 2) ensure_all_nodes_have_same_data(looper, txnPoolNodeSet) diff --git a/plenum/test/batching_3pc/catch-up/test_clearing_requests_after_catchup.py b/plenum/test/batching_3pc/catch-up/test_clearing_requests_after_catchup.py index 022d665a71..4fb99c6184 100644 --- a/plenum/test/batching_3pc/catch-up/test_clearing_requests_after_catchup.py +++ b/plenum/test/batching_3pc/catch-up/test_clearing_requests_after_catchup.py @@ -10,8 +10,8 @@ from stp_core.loop.eventually import eventually from plenum.test.delayers import cDelay, pDelay, ppDelay -from plenum.test.helper import sdk_send_batches_of_random_and_check, \ - sdk_send_batches_of_random, max_3pc_batch_limits, assertExp +from plenum.test.helper import vdr_send_batches_of_random_and_check, \ + vdr_send_batches_of_random, max_3pc_batch_limits, assertExp from plenum.test.checkpoints.conftest import chkFreqPatched, reqs_for_checkpoint @@ -35,21 +35,21 @@ def tconf(tconf): def test_clearing_forwarded_preprepared_request( looper, chkFreqPatched, reqs_for_checkpoint, txnPoolNodeSet, - sdk_pool_handle, sdk_wallet_steward): + vdr_pool_handle, vdr_wallet_steward): # Case when backup ordered correctly, but primary had problems. # As a result, master will execute caughtup txns and will be removed # from requests queues behind_node = txnPoolNodeSet[-1] - sdk_send_batches_of_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle, - sdk_wallet_steward, CHK_FREQ, CHK_FREQ) + vdr_send_batches_of_random_and_check(looper, txnPoolNodeSet, vdr_pool_handle, + vdr_wallet_steward, CHK_FREQ, CHK_FREQ) with delay_rules(behind_node.nodeIbStasher, pDelay(delay=sys.maxsize, instId=0), cDelay(delay=sys.maxsize, instId=0)): count = behind_node.spylog.count(behind_node.allLedgersCaughtUp) - sdk_send_batches_of_random(looper, txnPoolNodeSet, sdk_pool_handle, - sdk_wallet_steward, req_num, req_num) + vdr_send_batches_of_random(looper, txnPoolNodeSet, vdr_pool_handle, + vdr_wallet_steward, req_num, req_num) looper.run(eventually(node_caughtup, behind_node, count, retryWait=1)) @@ -61,13 +61,13 @@ def test_clearing_forwarded_preprepared_request( def test_deletion_non_forwarded_request( looper, chkFreqPatched, reqs_for_checkpoint, txnPoolNodeSet, - sdk_pool_handle, sdk_wallet_steward, tconf, tdir, allPluginsPath): + vdr_pool_handle, vdr_wallet_steward, tconf, tdir, allPluginsPath): behind_node = txnPoolNodeSet[-1] [behind_node.replicas.values()[1].discard_req_key(1, key) for key in behind_node.requests] behind_node.requests.clear() - sdk_send_batches_of_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle, - sdk_wallet_steward, CHK_FREQ, CHK_FREQ) + vdr_send_batches_of_random_and_check(looper, txnPoolNodeSet, vdr_pool_handle, + vdr_wallet_steward, CHK_FREQ, CHK_FREQ) behind_node.quorums.propagate = Quorum(len(txnPoolNodeSet) + 1) with delay_rules(behind_node.nodeIbStasher, @@ -75,8 +75,8 @@ def test_deletion_non_forwarded_request( pDelay(delay=sys.maxsize), cDelay(delay=sys.maxsize)): count = behind_node.spylog.count(behind_node.allLedgersCaughtUp) - sdk_send_batches_of_random(looper, txnPoolNodeSet, sdk_pool_handle, - sdk_wallet_steward, req_num, req_num) + vdr_send_batches_of_random(looper, txnPoolNodeSet, vdr_pool_handle, + vdr_wallet_steward, req_num, req_num) looper.run(eventually(node_caughtup, behind_node, count, retryWait=1)) # We clear caughtup requests diff --git a/plenum/test/batching_3pc/catch-up/test_freeing_forwarded_not_preprepared_request.py b/plenum/test/batching_3pc/catch-up/test_freeing_forwarded_not_preprepared_request.py index 1f27144274..9ba27fa66f 100644 --- a/plenum/test/batching_3pc/catch-up/test_freeing_forwarded_not_preprepared_request.py +++ b/plenum/test/batching_3pc/catch-up/test_freeing_forwarded_not_preprepared_request.py @@ -8,8 +8,8 @@ from stp_core.loop.eventually import eventually from plenum.test.delayers import cDelay, pDelay, ppDelay, chk_delay -from plenum.test.helper import sdk_send_batches_of_random_and_check, \ - sdk_send_batches_of_random, max_3pc_batch_limits, assertExp +from plenum.test.helper import vdr_send_batches_of_random_and_check, \ + vdr_send_batches_of_random, max_3pc_batch_limits, assertExp from plenum.test.checkpoints.conftest import chkFreqPatched, reqs_for_checkpoint @@ -33,20 +33,20 @@ def tconf(tconf): def test_freeing_forwarded_not_preprepared_request( looper, chkFreqPatched, reqs_for_checkpoint, txnPoolNodeSet, - sdk_pool_handle, sdk_wallet_steward, tconf, tdir, allPluginsPath): + vdr_pool_handle, vdr_wallet_steward, tconf, tdir, allPluginsPath): behind_node = txnPoolNodeSet[-1] behind_node.requests.clear() - sdk_send_batches_of_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle, - sdk_wallet_steward, CHK_FREQ, CHK_FREQ) + vdr_send_batches_of_random_and_check(looper, txnPoolNodeSet, vdr_pool_handle, + vdr_wallet_steward, CHK_FREQ, CHK_FREQ) count = behind_node.spylog.count(behind_node.allLedgersCaughtUp) with delay_rules(behind_node.nodeIbStasher, ppDelay(delay=sys.maxsize), pDelay(delay=sys.maxsize), cDelay(delay=sys.maxsize)): with delay_rules(behind_node.nodeIbStasher, chk_delay(delay=sys.maxsize)): - sdk_send_batches_of_random(looper, txnPoolNodeSet, sdk_pool_handle, - sdk_wallet_steward, req_num, req_num) + vdr_send_batches_of_random(looper, txnPoolNodeSet, vdr_pool_handle, + vdr_wallet_steward, req_num, req_num) looper.run(eventually(lambda: assertExp(len(behind_node.requests) == req_num))) # Start catchup with the quorum of Checkpoints looper.run(eventually(node_caughtup, behind_node, count, retryWait=1)) diff --git a/plenum/test/batching_3pc/catch-up/test_freeing_forwarded_preprepared_request.py b/plenum/test/batching_3pc/catch-up/test_freeing_forwarded_preprepared_request.py index 4af374cff3..270e2929a4 100644 --- a/plenum/test/batching_3pc/catch-up/test_freeing_forwarded_preprepared_request.py +++ b/plenum/test/batching_3pc/catch-up/test_freeing_forwarded_preprepared_request.py @@ -8,8 +8,8 @@ from stp_core.loop.eventually import eventually from plenum.test.delayers import cDelay, pDelay -from plenum.test.helper import sdk_send_batches_of_random_and_check, \ - sdk_send_batches_of_random, max_3pc_batch_limits, assertExp +from plenum.test.helper import vdr_send_batches_of_random_and_check, \ + vdr_send_batches_of_random, max_3pc_batch_limits, assertExp from plenum.test.checkpoints.conftest import chkFreqPatched, reqs_for_checkpoint @@ -33,19 +33,19 @@ def tconf(tconf): def test_freeing_forwarded_preprepared_request( looper, chkFreqPatched, reqs_for_checkpoint, txnPoolNodeSet, - sdk_pool_handle, sdk_wallet_steward): + vdr_pool_handle, vdr_wallet_steward): # Case, when both backup and primary had problems behind_node = txnPoolNodeSet[-1] - sdk_send_batches_of_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle, - sdk_wallet_steward, CHK_FREQ, CHK_FREQ) + vdr_send_batches_of_random_and_check(looper, txnPoolNodeSet, vdr_pool_handle, + vdr_wallet_steward, CHK_FREQ, CHK_FREQ) with delay_rules(behind_node.nodeIbStasher, pDelay(delay=sys.maxsize), cDelay(delay=sys.maxsize), ): count = behind_node.spylog.count(behind_node.allLedgersCaughtUp) - sdk_send_batches_of_random(looper, txnPoolNodeSet, sdk_pool_handle, - sdk_wallet_steward, req_num, req_num) + vdr_send_batches_of_random(looper, txnPoolNodeSet, vdr_pool_handle, + vdr_wallet_steward, req_num, req_num) looper.run(eventually(node_caughtup, behind_node, count, retryWait=1)) @@ -56,8 +56,8 @@ def _check(): assert all(r.executed for r in behind_node.requests.values() if behind_node.seqNoDB. get_by_full_digest(r.request.key)[1]) - sdk_send_batches_of_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle, - sdk_wallet_steward, CHK_FREQ, CHK_FREQ) + vdr_send_batches_of_random_and_check(looper, txnPoolNodeSet, vdr_pool_handle, + vdr_wallet_steward, CHK_FREQ, CHK_FREQ) # Master and backup replicas do not stash new requests and successfully order them assert len(behind_node.requests) == 0 diff --git a/plenum/test/batching_3pc/catch-up/test_state_reverted_before_catchup.py b/plenum/test/batching_3pc/catch-up/test_state_reverted_before_catchup.py index 0c46bb8f51..0c94f3c2e4 100644 --- a/plenum/test/batching_3pc/catch-up/test_state_reverted_before_catchup.py +++ b/plenum/test/batching_3pc/catch-up/test_state_reverted_before_catchup.py @@ -2,12 +2,12 @@ from plenum.test.delayers import cDelay, cpDelay from plenum.test.test_node import getNonPrimaryReplicas from plenum.test.batching_3pc.helper import checkNodesHaveSameRoots -from plenum.test.helper import sdk_signed_random_requests, sdk_send_and_check, \ - sdk_send_random_requests, sdk_get_replies +from plenum.test.helper import vdr_signed_random_requests, vdr_send_and_check, \ + vdr_send_random_requests, vdr_get_replies def test_unordered_state_reverted_before_catchup( - tconf, looper, txnPoolNodeSet, sdk_wallet_client, sdk_pool_handle): + tconf, looper, txnPoolNodeSet, vdr_wallet_client, vdr_pool_handle): """ Check that unordered state is reverted before starting catchup: - save the initial state on a node @@ -26,8 +26,8 @@ def test_unordered_state_reverted_before_catchup( # send reqs and make sure we are at the same state - reqs = sdk_signed_random_requests(looper, sdk_wallet_client, 10) - sdk_send_and_check(reqs, looper, txnPoolNodeSet, sdk_pool_handle) + reqs = vdr_signed_random_requests(looper, vdr_wallet_client, 10) + vdr_send_and_check(reqs, looper, txnPoolNodeSet, vdr_pool_handle) checkNodesHaveSameRoots(txnPoolNodeSet) # the state of the node before @@ -44,8 +44,8 @@ def test_unordered_state_reverted_before_catchup( non_primary_node.nodeIbStasher.delay(cpDelay()) # send requests - reqs = sdk_send_random_requests(looper, sdk_pool_handle, sdk_wallet_client, tconf.Max3PCBatchSize) - sdk_get_replies(looper, reqs, timeout=40) + reqs = vdr_send_random_requests(looper, vdr_pool_handle, vdr_wallet_client, tconf.Max3PCBatchSize) + vdr_get_replies(looper, reqs, timeout=40) committed_ledger_during_3pc = non_primary_node.getLedger( ledger_id).tree.root_hash diff --git a/plenum/test/batching_3pc/test_basic_batching.py b/plenum/test/batching_3pc/test_basic_batching.py index 9f275b7e22..4de7a262ec 100644 --- a/plenum/test/batching_3pc/test_basic_batching.py +++ b/plenum/test/batching_3pc/test_basic_batching.py @@ -2,63 +2,62 @@ import pytest from plenum.common.exceptions import UnauthorizedClientRequest, RequestRejectedException from plenum.test.batching_3pc.helper import checkNodesHaveSameRoots -from plenum.test.helper import sdk_send_random_requests, sdk_get_and_check_replies +from plenum.test.helper import vdr_send_random_requests, vdr_get_and_check_replies from plenum.common.exceptions import InvalidClientRequest -from plenum.test.helper import sdk_sign_request_from_dict, sdk_send_random_and_check +from plenum.test.helper import generate_invalid_unsigned_plenum_request, vdr_send_random_and_check from plenum.common.request import Request def testRequestStaticValidation(tconf, looper, txnPoolNodeSet, - sdk_wallet_client): + vdr_wallet_client): """ Check that for requests which fail static validation, REQNACK is sent :return: """ node = txnPoolNodeSet[0] - req = sdk_sign_request_from_dict(looper, sdk_wallet_client, {'something': 'nothing'}) - req = Request(**req) + req = generate_invalid_unsigned_plenum_request(vdr_wallet_client, {'something': 'nothing'}) with pytest.raises(InvalidClientRequest): node.doStaticValidation(req) def test3PCOverBatchWithThresholdReqs(tconf, looper, txnPoolNodeSet, - sdk_wallet_client, sdk_pool_handle): + vdr_wallet_client, vdr_pool_handle): """ Check that 3 phase commit happens when threshold number of requests are received and propagated. :return: """ - sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_client, tconf.Max3PCBatchSize) + vdr_send_random_and_check(looper, txnPoolNodeSet, vdr_pool_handle, vdr_wallet_client, tconf.Max3PCBatchSize) def test3PCOverBatchWithLessThanThresholdReqs(tconf, looper, txnPoolNodeSet, - sdk_wallet_client, sdk_pool_handle): + vdr_wallet_client, vdr_pool_handle): """ Check that 3 phase commit happens when threshold number of requests are not received but threshold time has passed :return: """ - sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_client, tconf.Max3PCBatchSize - 1) + vdr_send_random_and_check(looper, txnPoolNodeSet, vdr_pool_handle, vdr_wallet_client, tconf.Max3PCBatchSize - 1) def testTreeRootsCorrectAfterEachBatch(tconf, looper, txnPoolNodeSet, - sdk_pool_handle, sdk_wallet_client): + vdr_pool_handle, vdr_wallet_client): """ Check if both state root and txn tree root are correct and same on each node after each batch :return: """ # Send 1 batch - sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_client, tconf.Max3PCBatchSize) + vdr_send_random_and_check(looper, txnPoolNodeSet, vdr_pool_handle, vdr_wallet_client, tconf.Max3PCBatchSize) checkNodesHaveSameRoots(txnPoolNodeSet) # Send 2 batches - sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_client, 2 * tconf.Max3PCBatchSize) + vdr_send_random_and_check(looper, txnPoolNodeSet, vdr_pool_handle, vdr_wallet_client, 2 * tconf.Max3PCBatchSize) checkNodesHaveSameRoots(txnPoolNodeSet) def testRequestDynamicValidation(tconf, looper, txnPoolNodeSet, - sdk_pool_handle, sdk_wallet_client): + vdr_pool_handle, vdr_wallet_client): """ Check that for requests which fail dynamic (state based) validation, REJECT is sent to the client @@ -78,12 +77,12 @@ def rejectingMethod(self, req, pp_time): for replica in node.replicas._replicas.values(): replica._ordering_service._do_dynamic_validation = types.MethodType(rejectingMethod, replica._ordering_service) - reqs = sdk_send_random_requests(looper, sdk_pool_handle, - sdk_wallet_client, + reqs = vdr_send_random_requests(looper, vdr_pool_handle, + vdr_wallet_client, tconf.Max3PCBatchSize) - sdk_get_and_check_replies(looper, reqs[:-1]) + vdr_get_and_check_replies(looper, reqs[:-1]) with pytest.raises(RequestRejectedException) as e: - sdk_get_and_check_replies(looper, reqs[-1:]) + vdr_get_and_check_replies(looper, reqs[-1:]) assert 'Simulated rejection' in e._excinfo[1].args[0] assert 'UnauthorizedClientRequest' in e._excinfo[1].args[0] diff --git a/plenum/test/batching_3pc/test_batch_rejection.py b/plenum/test/batching_3pc/test_batch_rejection.py index dd5ac20bad..c8d3ba4cbf 100644 --- a/plenum/test/batching_3pc/test_batch_rejection.py +++ b/plenum/test/batching_3pc/test_batch_rejection.py @@ -5,20 +5,20 @@ from stp_core.loop.eventually import eventually from plenum.common.constants import DOMAIN_LEDGER_ID from plenum.common.util import updateNamedTuple -from plenum.test.helper import sdk_send_random_requests, \ - sdk_send_random_and_check +from plenum.test.helper import vdr_send_random_requests, \ + vdr_send_random_and_check from plenum.test.test_node import getNonPrimaryReplicas, \ getPrimaryReplica @pytest.fixture(scope="module") -def setup(tconf, looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_client): +def setup(tconf, looper, txnPoolNodeSet, vdr_pool_handle, vdr_wallet_client): # Patch the 3phase request sending method to send incorrect digest and pr, otherR = getPrimaryReplica(txnPoolNodeSet, instId=0), \ getNonPrimaryReplicas(txnPoolNodeSet, instId=0) - sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle, - sdk_wallet_client, tconf.Max3PCBatchSize) + vdr_send_random_and_check(looper, txnPoolNodeSet, vdr_pool_handle, + vdr_wallet_client, tconf.Max3PCBatchSize) stateRoot = pr._ordering_service.get_state_root_hash(DOMAIN_LEDGER_ID, to_str=False) origMethod = pr._ordering_service.create_3pc_batch @@ -33,7 +33,7 @@ def badMethod(self, ledgerId): return pp pr._ordering_service.create_3pc_batch = types.MethodType(badMethod, pr._ordering_service) - sdk_send_random_requests(looper, sdk_pool_handle, sdk_wallet_client, + vdr_send_random_requests(looper, vdr_pool_handle, vdr_wallet_client, tconf.Max3PCBatchSize) return pr, otherR, stateRoot @@ -75,11 +75,11 @@ def testViewChangeAfterBatchRejected(viewChanged): def testMoreBatchesWillBeSentAfterViewChange(reverted, viewChanged, txnPoolNodeSet, - sdk_pool_handle, sdk_wallet_client, + vdr_pool_handle, vdr_wallet_client, tconf, looper): """ After retrying discarded batches, new batches are sent :return: """ - sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle, - sdk_wallet_client, tconf.Max3PCBatchSize) + vdr_send_random_and_check(looper, txnPoolNodeSet, vdr_pool_handle, + vdr_wallet_client, tconf.Max3PCBatchSize) diff --git a/plenum/test/batching_3pc/test_batching_scenarios.py b/plenum/test/batching_3pc/test_batching_scenarios.py index 6a0e3ad740..7720d54ec4 100644 --- a/plenum/test/batching_3pc/test_batching_scenarios.py +++ b/plenum/test/batching_3pc/test_batching_scenarios.py @@ -4,7 +4,7 @@ from plenum.test.spy_helpers import getAllArgs from plenum.test.test_node import getPrimaryReplica, getNonPrimaryReplicas from plenum.test.view_change.conftest import perf_chk_patched -from plenum.test.helper import sdk_send_random_and_check +from plenum.test.helper import vdr_send_random_and_check logger = getlogger() @@ -12,7 +12,7 @@ def testPrePrepareProcessedInOrder(perf_chk_patched, looper, txnPoolNodeSet, - sdk_pool_handle, sdk_wallet_client): + vdr_pool_handle, vdr_wallet_client): """ A non-primary receives PRE-PREPARE out of order, it receives with ppSeqNo 2 earlier than it receives the one with ppSeqNo 1 but it stashes the one @@ -43,7 +43,7 @@ def specificPrePrepares(wrappedMsg): format(node)) node.nodeIbStasher.delay(specificPrePrepares) - sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_client, + vdr_send_random_and_check(looper, txnPoolNodeSet, vdr_pool_handle, vdr_wallet_client, (ppsToDelay + 1) * tconf.Max3PCBatchSize) checkNodesHaveSameRoots(txnPoolNodeSet) diff --git a/plenum/test/blacklist/test_blacklist_client.py b/plenum/test/blacklist/test_blacklist_client.py index fd02c29041..62bfafc63a 100644 --- a/plenum/test/blacklist/test_blacklist_client.py +++ b/plenum/test/blacklist/test_blacklist_client.py @@ -4,29 +4,29 @@ from plenum.common.constants import CURRENT_PROTOCOL_VERSION from plenum.common.exceptions import RequestNackedException -from plenum.test.helper import sdk_random_request_objects, sdk_send_signed_requests, \ - sdk_get_and_check_replies +from plenum.test.helper import vdr_random_request_objects, vdr_send_signed_requests, \ + vdr_get_and_check_replies from stp_core.loop.eventually import eventually from plenum.test import waits # noinspection PyIncorrectDocstring,PyUnusedLocal,PyShadowingNames def testDoNotBlacklistClient(looper, txnPoolNodeSet, - sdk_wallet_client, sdk_pool_handle, + vdr_wallet_client, vdr_pool_handle, poolTxnClientNames): """ Client should be not be blacklisted by node on sending an unsigned request """ client_name = poolTxnClientNames[0] - _, did = sdk_wallet_client + _, did = vdr_wallet_client # No node should blacklist the client - req_obj = sdk_random_request_objects(1, identifier=did, + req_obj = vdr_random_request_objects(1, identifier=did, protocol_version=CURRENT_PROTOCOL_VERSION)[0] - reqs = sdk_send_signed_requests(sdk_pool_handle, [json.dumps(req_obj.as_dict)]) + reqs = vdr_send_signed_requests(vdr_pool_handle, [req_obj], looper) with pytest.raises(RequestNackedException, match='MissingSignature'): - sdk_get_and_check_replies(looper, reqs) + vdr_get_and_check_replies(looper, reqs) def chk(): for node in txnPoolNodeSet: diff --git a/plenum/test/bls/helper.py b/plenum/test/bls/helper.py index 8d689a8450..2b112cfe22 100644 --- a/plenum/test/bls/helper.py +++ b/plenum/test/bls/helper.py @@ -18,11 +18,11 @@ MULTI_SIGNATURE_PARTICIPANTS, MULTI_SIGNATURE_SIGNATURE, MULTI_SIGNATURE_VALUE from plenum.common.keygen_utils import init_bls_keys from plenum.common.util import hexToFriendly -from plenum.test.helper import sdk_send_random_and_check, create_commit_bls_sig -from plenum.test.node_request.helper import sdk_ensure_pool_functional +from plenum.test.helper import vdr_send_random_and_check, create_commit_bls_sig +from plenum.test.node_request.helper import vdr_ensure_pool_functional from plenum.test.node_catchup.helper import waitNodeDataEquality -from plenum.test.pool_transactions.helper import sdk_send_update_node, \ - sdk_pool_refresh +from plenum.test.pool_transactions.helper import vdr_send_update_node, \ + vdr_pool_refresh from stp_core.common.log import getlogger logger = getlogger() @@ -56,7 +56,7 @@ def sdk_check_bls_multi_sig_after_send(looper, txnPoolNodeSet, # Using loop to avoid 3pc batching state_roots = [] for i in range(number_of_requests): - sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle, + vdr_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_handle, 1) waitNodeDataEquality(looper, txnPoolNodeSet[0], *txnPoolNodeSet[:-1]) state_roots.append( @@ -134,7 +134,7 @@ def sdk_change_bls_key(looper, txnPoolNodeSet, key_in_txn = new_bls or new_blspk bls_key_proof = new_key_proof or key_proof node_dest = hexToFriendly(node.nodestack.verhex) - sdk_send_update_node(looper, sdk_wallet_steward, + vdr_send_update_node(looper, sdk_wallet_steward, sdk_pool_handle, node_dest, node.name, None, None, @@ -147,9 +147,9 @@ def sdk_change_bls_key(looper, txnPoolNodeSet, poolSetExceptOne.remove(node) waitNodeDataEquality(looper, node, *poolSetExceptOne) if pool_refresh: - sdk_pool_refresh(looper, sdk_pool_handle) + vdr_pool_refresh(looper, sdk_pool_handle) if check_functional: - sdk_ensure_pool_functional(looper, txnPoolNodeSet, sdk_wallet_steward, sdk_pool_handle) + vdr_ensure_pool_functional(looper, txnPoolNodeSet, sdk_wallet_steward, sdk_pool_handle) return new_blspk @@ -289,7 +289,7 @@ def update_bls_keys_no_proof(node_index, sdk_wallet_stewards, sdk_pool_handle, l sdk_wallet_steward = sdk_wallet_stewards[node_index] new_blspk, key_proof = init_bls_keys(node.keys_dir, node.name) node_dest = hexToFriendly(node.nodestack.verhex) - sdk_send_update_node(looper, sdk_wallet_steward, + vdr_send_update_node(looper, sdk_wallet_steward, sdk_pool_handle, node_dest, node.name, None, None, @@ -300,7 +300,7 @@ def update_bls_keys_no_proof(node_index, sdk_wallet_stewards, sdk_pool_handle, l poolSetExceptOne = list(txnPoolNodeSet) poolSetExceptOne.remove(node) waitNodeDataEquality(looper, node, *poolSetExceptOne) - sdk_pool_refresh(looper, sdk_pool_handle) + vdr_pool_refresh(looper, sdk_pool_handle) return new_blspk diff --git a/plenum/test/bls/test_add_bls_key.py b/plenum/test/bls/test_add_bls_key.py index c009fe858a..a597c73453 100644 --- a/plenum/test/bls/test_add_bls_key.py +++ b/plenum/test/bls/test_add_bls_key.py @@ -13,8 +13,8 @@ def test_add_bls_one_node(looper, txnPoolNodeSet, - sdk_pool_handle, - sdk_wallet_stewards, sdk_wallet_client): + vdr_pool_handle, + vdr_wallet_stewards, vdr_wallet_client): ''' Added BLS key for 1st Node; do not expect that BLS multi-sigs are applied since no consensus (n-f) @@ -22,16 +22,16 @@ def test_add_bls_one_node(looper, check_update_bls_key(node_num=0, saved_multi_sigs_count=0, looper=looper, txnPoolNodeSet=txnPoolNodeSet, - sdk_wallet_stewards=sdk_wallet_stewards, - sdk_wallet_client=sdk_wallet_client, - sdk_pool_handle=sdk_pool_handle) + sdk_wallet_stewards=vdr_wallet_stewards, + sdk_wallet_client=vdr_wallet_client, + sdk_pool_handle=vdr_pool_handle) def test_add_bls_two_nodes(looper, txnPoolNodeSet, - sdk_pool_handle, - sdk_wallet_stewards, - sdk_wallet_client): + vdr_pool_handle, + vdr_wallet_stewards, + vdr_wallet_client): ''' Added BLS key for 1st and 2d Nodes; do not expect that BLS multi-sigs are applied since no consensus (n-f) @@ -39,16 +39,16 @@ def test_add_bls_two_nodes(looper, check_update_bls_key(node_num=1, saved_multi_sigs_count=0, looper=looper, txnPoolNodeSet=txnPoolNodeSet, - sdk_wallet_stewards=sdk_wallet_stewards, - sdk_wallet_client=sdk_wallet_client, - sdk_pool_handle=sdk_pool_handle) + sdk_wallet_stewards=vdr_wallet_stewards, + sdk_wallet_client=vdr_wallet_client, + sdk_pool_handle=vdr_pool_handle) def test_add_bls_three_nodes(looper, txnPoolNodeSet, - sdk_pool_handle, - sdk_wallet_stewards, - sdk_wallet_client): + vdr_pool_handle, + vdr_wallet_stewards, + vdr_wallet_client): ''' Added BLS key for 1st, 2d and 3d Nodes; expect that BLS multi-sigs are applied since we have consensus now (3=n-f) @@ -68,16 +68,16 @@ def patched_set_validators(self, validators): check_update_bls_key(node_num=2, saved_multi_sigs_count=4, looper=looper, txnPoolNodeSet=txnPoolNodeSet, - sdk_wallet_stewards=sdk_wallet_stewards, - sdk_wallet_client=sdk_wallet_client, - sdk_pool_handle=sdk_pool_handle) + sdk_wallet_stewards=vdr_wallet_stewards, + sdk_wallet_client=vdr_wallet_client, + sdk_pool_handle=vdr_pool_handle) def test_add_bls_all_nodes(looper, txnPoolNodeSet, - sdk_pool_handle, - sdk_wallet_stewards, - sdk_wallet_client): + vdr_pool_handle, + vdr_wallet_stewards, + vdr_wallet_client): ''' Eventually added BLS key for all Nodes; expect that BLS multi-sigs are applied since we have consensus now (4 > n-f) @@ -85,6 +85,6 @@ def test_add_bls_all_nodes(looper, check_update_bls_key(node_num=3, saved_multi_sigs_count=4, looper=looper, txnPoolNodeSet=txnPoolNodeSet, - sdk_wallet_stewards=sdk_wallet_stewards, - sdk_wallet_client=sdk_wallet_client, - sdk_pool_handle=sdk_pool_handle) + sdk_wallet_stewards=vdr_wallet_stewards, + sdk_wallet_client=vdr_wallet_client, + sdk_pool_handle=vdr_pool_handle) diff --git a/plenum/test/bls/test_add_incorrect_bls_key.py b/plenum/test/bls/test_add_incorrect_bls_key.py index 74b56f7752..1f083ee81c 100644 --- a/plenum/test/bls/test_add_incorrect_bls_key.py +++ b/plenum/test/bls/test_add_incorrect_bls_key.py @@ -10,9 +10,9 @@ def test_add_incorrect_bls_one_node(looper, txnPoolNodeSet, - sdk_pool_handle, - sdk_wallet_stewards, - sdk_wallet_client): + vdr_pool_handle, + vdr_wallet_stewards, + vdr_wallet_client): ''' Added wrong BLS key for 1st Node; do not expect that BLS multi-sigs are applied @@ -20,17 +20,17 @@ def test_add_incorrect_bls_one_node(looper, check_update_bls_key(node_num=0, saved_multi_sigs_count=0, looper=looper, txnPoolNodeSet=txnPoolNodeSet, - sdk_wallet_stewards=sdk_wallet_stewards, - sdk_wallet_client=sdk_wallet_client, - sdk_pool_handle=sdk_pool_handle, + sdk_wallet_stewards=vdr_wallet_stewards, + sdk_wallet_client=vdr_wallet_client, + sdk_pool_handle=vdr_pool_handle, add_wrong=True) def test_add_incorrect_bls_two_nodes(looper, txnPoolNodeSet, - sdk_pool_handle, - sdk_wallet_stewards, - sdk_wallet_client): + vdr_pool_handle, + vdr_wallet_stewards, + vdr_wallet_client): ''' Added wrong BLS key for 1st and 2d Nodes; do not expect that BLS multi-sigs are applied @@ -38,17 +38,17 @@ def test_add_incorrect_bls_two_nodes(looper, check_update_bls_key(node_num=1, saved_multi_sigs_count=0, looper=looper, txnPoolNodeSet=txnPoolNodeSet, - sdk_wallet_stewards=sdk_wallet_stewards, - sdk_wallet_client=sdk_wallet_client, - sdk_pool_handle=sdk_pool_handle, + sdk_wallet_stewards=vdr_wallet_stewards, + sdk_wallet_client=vdr_wallet_client, + sdk_pool_handle=vdr_pool_handle, add_wrong=True) def test_add_incorrect_bls_three_nodes(looper, txnPoolNodeSet, - sdk_pool_handle, - sdk_wallet_stewards, - sdk_wallet_client): + vdr_pool_handle, + vdr_wallet_stewards, + vdr_wallet_client): ''' Added wrong BLS key for 1-3 Nodes; do not expect that BLS multi-sigs are applied @@ -60,17 +60,17 @@ def test_add_incorrect_bls_three_nodes(looper, check_update_bls_key(node_num=2, saved_multi_sigs_count=0, looper=looper, txnPoolNodeSet=txnPoolNodeSet, - sdk_wallet_stewards=sdk_wallet_stewards, - sdk_wallet_client=sdk_wallet_client, - sdk_pool_handle=sdk_pool_handle, + sdk_wallet_stewards=vdr_wallet_stewards, + sdk_wallet_client=vdr_wallet_client, + sdk_pool_handle=vdr_pool_handle, add_wrong=True) def test_add_incorrect_bls_all_nodes(looper, txnPoolNodeSet, - sdk_pool_handle, - sdk_wallet_stewards, - sdk_wallet_client): + vdr_pool_handle, + vdr_wallet_stewards, + vdr_wallet_client): ''' Added wrong BLS key for all Nodes; Still do not expect that BLS multi-sigs are applied @@ -78,7 +78,7 @@ def test_add_incorrect_bls_all_nodes(looper, check_update_bls_key(node_num=3, saved_multi_sigs_count=0, looper=looper, txnPoolNodeSet=txnPoolNodeSet, - sdk_wallet_stewards=sdk_wallet_stewards, - sdk_wallet_client=sdk_wallet_client, - sdk_pool_handle=sdk_pool_handle, + sdk_wallet_stewards=vdr_wallet_stewards, + sdk_wallet_client=vdr_wallet_client, + sdk_pool_handle=vdr_pool_handle, add_wrong=True) diff --git a/plenum/test/bls/test_bls_key_registry_pool_manager.py b/plenum/test/bls/test_bls_key_registry_pool_manager.py index b5fa017150..0e0bafede5 100644 --- a/plenum/test/bls/test_bls_key_registry_pool_manager.py +++ b/plenum/test/bls/test_bls_key_registry_pool_manager.py @@ -56,8 +56,8 @@ def test_get_key_for_old_root_keys_changed(bls_key_register_ledger, txnPoolNodeSet, node, looper, - sdk_wallet_steward, - sdk_pool_handle): + vdr_wallet_steward, + vdr_pool_handle): old_bls_key = get_payload_data(pool_node_txns[0])[DATA][BLS_KEY] new_bls_key, key_proof = init_bls_keys(node.keys_dir, node.name) old_pool_root_hash = node.poolManager.state.committedHeadHash @@ -66,8 +66,8 @@ def test_get_key_for_old_root_keys_changed(bls_key_register_ledger, sdk_change_bls_key(looper, txnPoolNodeSet, node, - sdk_pool_handle, - sdk_wallet_steward, + vdr_pool_handle, + vdr_wallet_steward, add_wrong=False, new_bls=new_bls_key, new_key_proof=key_proof) diff --git a/plenum/test/bls/test_bls_not_depend_on_node_reg.py b/plenum/test/bls/test_bls_not_depend_on_node_reg.py index 7ab439d0c4..7a3cd49c52 100644 --- a/plenum/test/bls/test_bls_not_depend_on_node_reg.py +++ b/plenum/test/bls/test_bls_not_depend_on_node_reg.py @@ -7,7 +7,7 @@ from plenum.test.pool_transactions.helper import demote_node from plenum.test.test_node import TestNode, checkNodesConnected, ensureElectionsDone, ensure_node_disconnected -from plenum.test.helper import sdk_send_batches_of_random_and_check +from plenum.test.helper import vdr_send_batches_of_random_and_check from plenum.common.config_helper import PNodeConfigHelper from plenum.common.types import f @@ -17,9 +17,9 @@ def test_bls_not_depend_on_node_reg(looper, txnPoolNodeSet, - sdk_pool_handle, sdk_wallet_client): - sdk_send_batches_of_random_and_check(looper, txnPoolNodeSet, - sdk_pool_handle, sdk_wallet_client, 3, 3) + vdr_pool_handle, vdr_wallet_client): + vdr_send_batches_of_random_and_check(looper, txnPoolNodeSet, + vdr_pool_handle, vdr_wallet_client, 3, 3) node = txnPoolNodeSet[2] last_pre_prepare = \ @@ -44,10 +44,10 @@ def test_bls_not_depend_on_node_reg(looper, txnPoolNodeSet, def test_order_after_demote_and_restart(looper, txnPoolNodeSet, - sdk_pool_handle, sdk_wallet_client, tdir, tconf, allPluginsPath, - sdk_wallet_stewards): - sdk_send_batches_of_random_and_check(looper, txnPoolNodeSet, - sdk_pool_handle, sdk_wallet_client, 3, 3) + vdr_pool_handle, vdr_wallet_client, tdir, tconf, allPluginsPath, + vdr_wallet_stewards): + vdr_send_batches_of_random_and_check(looper, txnPoolNodeSet, + vdr_pool_handle, vdr_wallet_client, 3, 3) primary_node = txnPoolNodeSet[0] node_to_stop = txnPoolNodeSet[1] @@ -59,7 +59,7 @@ def test_order_after_demote_and_restart(looper, txnPoolNodeSet, looper.removeProdable(node_to_stop) ensure_node_disconnected(looper, node_to_stop, txnPoolNodeSet, timeout=2) - demote_node(looper, sdk_wallet_stewards[2], sdk_pool_handle, node_to_demote) + demote_node(looper, vdr_wallet_stewards[2], vdr_pool_handle, node_to_demote) config_helper = PNodeConfigHelper(node_to_stop.name, tconf, chroot=tdir) restarted_node = TestNode(node_to_stop.name, config_helper=config_helper, config=tconf, @@ -70,8 +70,8 @@ def test_order_after_demote_and_restart(looper, txnPoolNodeSet, looper.run(checkNodesConnected(txnPoolNodeSet)) ensureElectionsDone(looper=looper, nodes=txnPoolNodeSet, check_primaries=False) - sdk_send_batches_of_random_and_check(looper, txnPoolNodeSet, - sdk_pool_handle, sdk_wallet_client, 1, 1) + vdr_send_batches_of_random_and_check(looper, txnPoolNodeSet, + vdr_pool_handle, vdr_wallet_client, 1, 1) def get_current_bls_keys(node): bls_keys_raw_dict = node.master_replica._bls_bft_replica._bls_bft.bls_key_register._current_bls_keys diff --git a/plenum/test/bls/test_commit_signature_validation_integration.py b/plenum/test/bls/test_commit_signature_validation_integration.py index 240764fc65..d128a2510a 100644 --- a/plenum/test/bls/test_commit_signature_validation_integration.py +++ b/plenum/test/bls/test_commit_signature_validation_integration.py @@ -3,10 +3,10 @@ from orderedset._orderedset import OrderedSet from plenum.common.constants import STEWARD_STRING -from plenum.test.helper import sdk_send_random_request, get_key_from_req, sdk_get_and_check_replies, \ - sdk_send_random_and_check -from plenum.test.pool_transactions.helper import prepare_node_request, \ - sdk_sign_and_send_prepared_request, sdk_add_new_nym, prepare_new_node_data +from plenum.test.helper import vdr_send_random_request, get_key_from_req, vdr_get_and_check_replies, \ + vdr_send_random_and_check +from plenum.test.pool_transactions.helper import vdr_prepare_node_request, \ + vdr_sign_and_send_prepared_request, vdr_add_new_nym, prepare_new_node_data from stp_core.loop.eventually import eventually @@ -29,9 +29,9 @@ def ord_delay(nodes): def test_commit_signature_validation_integration(looper, txnPoolNodeSet, - sdk_pool_handle, - sdk_wallet_steward, - sdk_wallet_client, + vdr_pool_handle, + vdr_wallet_steward, + vdr_wallet_client, tconf, tdir): ''' @@ -45,13 +45,13 @@ def test_commit_signature_validation_integration(looper, fast_nodes = txnPoolNodeSet[:2] slow_nodes = txnPoolNodeSet[2:] - sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle, - sdk_wallet_steward, 1) + vdr_send_random_and_check(looper, txnPoolNodeSet, vdr_pool_handle, + vdr_wallet_steward, 1) # create new steward - new_steward_wallet_handle = sdk_add_new_nym(looper, - sdk_pool_handle, - sdk_wallet_steward, + new_steward_wallet_handle = vdr_add_new_nym(looper, + vdr_pool_handle, + vdr_wallet_steward, alias="testClientSteward945", role=STEWARD_STRING) @@ -60,8 +60,7 @@ def test_commit_signature_validation_integration(looper, # create node request to add new demote node _, steward_did = new_steward_wallet_handle - node_request = looper.loop.run_until_complete( - prepare_node_request(steward_did, + node_request = vdr_prepare_node_request(steward_did, new_node_name="new_node", clientIp=clientIp, clientPort=clientPort, @@ -70,12 +69,12 @@ def test_commit_signature_validation_integration(looper, bls_key=bls_key, sigseed=sigseed, services=[], - key_proof=key_proof)) + key_proof=key_proof) first_ordered = txnPoolNodeSet[0].master_last_ordered_3PC with ord_delay(slow_nodes): - request1 = sdk_sign_and_send_prepared_request(looper, new_steward_wallet_handle, - sdk_pool_handle, node_request) + request1 = vdr_sign_and_send_prepared_request(looper, new_steward_wallet_handle, + vdr_pool_handle, node_request) key1 = get_key_from_req(request1[0]) @@ -93,7 +92,7 @@ def check_fast_nodes_ordered_request(): looper.run(eventually(check_fast_nodes_ordered_request)) - request2 = sdk_send_random_request(looper, sdk_pool_handle, sdk_wallet_client) + request2 = vdr_send_random_request(looper, vdr_pool_handle, vdr_wallet_client) looper.run(eventually(check_nodes_receive_pp, first_ordered[0], first_ordered[1] + 2)) def check_nodes_receive_commits(view_no, seq_no): @@ -101,5 +100,5 @@ def check_nodes_receive_commits(view_no, seq_no): assert len(node.master_replica._ordering_service.commits[view_no, seq_no].voters) >= node.f + 1 looper.run(eventually(check_nodes_receive_commits, first_ordered[0], first_ordered[1] + 2)) - sdk_get_and_check_replies(looper, [request1]) - sdk_get_and_check_replies(looper, [request2]) + vdr_get_and_check_replies(looper, [request1]) + vdr_get_and_check_replies(looper, [request2]) diff --git a/plenum/test/bls/test_get_state_proof.py b/plenum/test/bls/test_get_state_proof.py index 7f5684f9a1..2a3b10c71e 100644 --- a/plenum/test/bls/test_get_state_proof.py +++ b/plenum/test/bls/test_get_state_proof.py @@ -2,19 +2,19 @@ from plenum.test.buy_handler import BuyHandler from plenum.test.constants import GET_BUY -from plenum.test.helper import sdk_send_random_and_check +from plenum.test.helper import vdr_send_random_and_check from plenum.common.types import f from plenum.common.constants import ROOT_HASH -def test_get_state_value_and_proof(looper, sdk_wallet_steward, - sdk_pool_handle, txnPoolNodeSet): +def test_get_state_value_and_proof(looper, vdr_wallet_steward, + vdr_pool_handle, txnPoolNodeSet): node = txnPoolNodeSet[0] req_handler = node.read_manager.request_handlers[GET_BUY] - req1, _ = sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_steward, 1)[0] + req1, _ = vdr_send_random_and_check(looper, txnPoolNodeSet, vdr_pool_handle, vdr_wallet_steward, 1)[0] # Save headHash after first request head1 = req_handler.state.headHash - sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_steward, 1) + vdr_send_random_and_check(looper, txnPoolNodeSet, vdr_pool_handle, vdr_wallet_steward, 1) # Save headHash after second request head2 = req_handler.state.headHash # Build path to first request diff --git a/plenum/test/bls/test_no_state_proof.py b/plenum/test/bls/test_no_state_proof.py index 9cf753e7f8..c18f4d9cb7 100644 --- a/plenum/test/bls/test_no_state_proof.py +++ b/plenum/test/bls/test_no_state_proof.py @@ -4,16 +4,15 @@ from plenum.common.util import get_utc_epoch from plenum.test.buy_handler import BuyHandler from plenum.test.constants import GET_BUY -from plenum.test.helper import sdk_json_to_request_object, sdk_signed_random_requests +from plenum.test.helper import vdr_json_to_plenum_request_object, vdr_signed_random_requests nodeCount = 4 nodes_wth_bls = 0 def test_make_proof_bls_disabled(looper, txnPoolNodeSet, - sdk_wallet_client): - req = json.loads( - sdk_signed_random_requests(looper, sdk_wallet_client, 1)[0]) + vdr_wallet_client): + req = json.loads(vdr_signed_random_requests(looper, vdr_wallet_client, 1)[0].body) for node in txnPoolNodeSet: req_handler = node.read_manager.request_handlers[GET_BUY] @@ -23,17 +22,13 @@ def test_make_proof_bls_disabled(looper, txnPoolNodeSet, def test_make_result_bls_disabled(looper, txnPoolNodeSet, - sdk_wallet_client): - req = json.loads( - sdk_signed_random_requests(looper, sdk_wallet_client, 1)[0]) + vdr_wallet_client): + req = json.loads(vdr_signed_random_requests(looper, vdr_wallet_client, 1)[0].body) for node in txnPoolNodeSet: req_handler = node.read_manager.request_handlers[GET_BUY] key = BuyHandler.prepare_buy_key(req['identifier'], req['reqId']) _, _, _, proof = req_handler.lookup(key, with_proof=True) - result = req_handler.make_result(sdk_json_to_request_object(req), - {TXN_TYPE: "buy"}, - 2, - get_utc_epoch(), - proof) + custom_req = vdr_json_to_plenum_request_object(req) + result = req_handler.make_result(custom_req, {TXN_TYPE: "buy"}, 2, get_utc_epoch(), proof) assert STATE_PROOF not in result diff --git a/plenum/test/bls/test_send_txns_bls_consensus.py b/plenum/test/bls/test_send_txns_bls_consensus.py index 6b2bb931c3..95c0fe34aa 100644 --- a/plenum/test/bls/test_send_txns_bls_consensus.py +++ b/plenum/test/bls/test_send_txns_bls_consensus.py @@ -12,7 +12,7 @@ def test_each_node_has_bls(txnPoolNodeSet): def test_send_txns_bls_consensus(looper, txnPoolNodeSet, - sdk_pool_handle, sdk_wallet_client): + vdr_pool_handle, vdr_wallet_client): # make sure that we have commits from all nodes, and have 5 of 7 (n-f) BLS sigs there is enough # otherwise we may have 3 commits, but 1 of them may be without BLS, so we will Order this txn, but without multi-sig for node in txnPoolNodeSet: @@ -22,5 +22,5 @@ def test_send_txns_bls_consensus(looper, txnPoolNodeSet, # we expect that although not all nodes can sign with BLS (because not all nodes have BLS keys), # we get multi-sig on all nodes (since all nodes can verify signatures) sdk_check_bls_multi_sig_after_send(looper, txnPoolNodeSet, - sdk_pool_handle, sdk_wallet_client, + vdr_pool_handle, vdr_wallet_client, saved_multi_sigs_count=nodeCount) diff --git a/plenum/test/bls/test_send_txns_bls_less_than_consensus.py b/plenum/test/bls/test_send_txns_bls_less_than_consensus.py index f4a7654cda..f74f1f5f98 100644 --- a/plenum/test/bls/test_send_txns_bls_less_than_consensus.py +++ b/plenum/test/bls/test_send_txns_bls_less_than_consensus.py @@ -12,10 +12,10 @@ def test_each_node_has_bls(txnPoolNodeSet): def test_send_txns_bls_less_than_consensus(looper, txnPoolNodeSet, - sdk_pool_handle, sdk_wallet_client): + vdr_pool_handle, vdr_wallet_client): # make sure that we have commits from all nodes, and have 4 of 7 ( < n-f) BLS sigs there is not enough for node in txnPoolNodeSet: node.quorums.commit = Quorum(nodeCount) sdk_check_bls_multi_sig_after_send(looper, txnPoolNodeSet, - sdk_pool_handle, sdk_wallet_client, + vdr_pool_handle, vdr_wallet_client, saved_multi_sigs_count=0) diff --git a/plenum/test/bls/test_send_txns_full_bls.py b/plenum/test/bls/test_send_txns_full_bls.py index 82134e6533..3223f70658 100644 --- a/plenum/test/bls/test_send_txns_full_bls.py +++ b/plenum/test/bls/test_send_txns_full_bls.py @@ -11,7 +11,7 @@ def test_each_node_has_bls(txnPoolNodeSet): def test_send_txns_full_bls(looper, txnPoolNodeSet, - sdk_pool_handle, sdk_wallet_client): + vdr_pool_handle, vdr_wallet_client): sdk_check_bls_multi_sig_after_send(looper, txnPoolNodeSet, - sdk_pool_handle, sdk_wallet_client, + vdr_pool_handle, vdr_wallet_client, saved_multi_sigs_count=nodeCount) diff --git a/plenum/test/bls/test_send_txns_no_bls.py b/plenum/test/bls/test_send_txns_no_bls.py index 8d48d3b2a3..c6a98a8133 100644 --- a/plenum/test/bls/test_send_txns_no_bls.py +++ b/plenum/test/bls/test_send_txns_no_bls.py @@ -11,7 +11,7 @@ def test_each_node_has_bls(txnPoolNodeSet): def test_send_txns_no_bls(looper, txnPoolNodeSet, - sdk_pool_handle, sdk_wallet_client): + vdr_pool_handle, vdr_wallet_client): sdk_check_bls_multi_sig_after_send(looper, txnPoolNodeSet, - sdk_pool_handle, sdk_wallet_client, + vdr_pool_handle, vdr_wallet_client, saved_multi_sigs_count=0) diff --git a/plenum/test/bls/test_sign_validation_for_key_proof_exist.py b/plenum/test/bls/test_sign_validation_for_key_proof_exist.py index 6d6baebee8..eb881f0512 100644 --- a/plenum/test/bls/test_sign_validation_for_key_proof_exist.py +++ b/plenum/test/bls/test_sign_validation_for_key_proof_exist.py @@ -14,9 +14,9 @@ def validate_bls_signature_without_key_proof(request): def test_switched_off_sign_validation_for_key_proof_exist(looper, txnPoolNodeSet, - sdk_pool_handle, - sdk_wallet_stewards, - sdk_wallet_client, + vdr_pool_handle, + vdr_wallet_stewards, + vdr_wallet_client, monkeypatch, validate_bls_signature_without_key_proof): ''' @@ -30,7 +30,7 @@ def test_switched_off_sign_validation_for_key_proof_exist(looper, monkeypatch.setattr(n.write_manager.request_handlers[NODE][0], 'static_validation', lambda req: True) - new_blspk = update_bls_keys_no_proof(0, sdk_wallet_stewards, sdk_pool_handle, looper, txnPoolNodeSet) + new_blspk = update_bls_keys_no_proof(0, vdr_wallet_stewards, vdr_pool_handle, looper, txnPoolNodeSet) monkeypatch.undo() with update_validate_bls_signature_without_key_proof(txnPoolNodeSet, validate_bls_signature_without_key_proof): diff --git a/plenum/test/bls/test_sign_validation_for_key_proof_exist_ordering.py b/plenum/test/bls/test_sign_validation_for_key_proof_exist_ordering.py index 507efdf8fe..32aaae821d 100644 --- a/plenum/test/bls/test_sign_validation_for_key_proof_exist_ordering.py +++ b/plenum/test/bls/test_sign_validation_for_key_proof_exist_ordering.py @@ -4,7 +4,7 @@ from plenum.common.exceptions import PoolLedgerTimeoutException from plenum.test.bls.helper import update_bls_keys_no_proof, \ update_validate_bls_signature_without_key_proof -from plenum.test.helper import sdk_send_random_and_check +from plenum.test.helper import vdr_send_random_and_check nodes_wth_bls = 0 @@ -16,9 +16,9 @@ def validate_bls_signature_without_key_proof(request): def test_ordering_with_nodes_have_not_bls_key_proofs(looper, txnPoolNodeSet, - sdk_pool_handle, - sdk_wallet_stewards, - sdk_wallet_client, + vdr_pool_handle, + vdr_wallet_stewards, + vdr_wallet_client, monkeypatch, validate_bls_signature_without_key_proof): ''' @@ -34,14 +34,14 @@ def test_ordering_with_nodes_have_not_bls_key_proofs(looper, 'static_validation', lambda req: True) for node_index in range(0, len(txnPoolNodeSet)): - update_bls_keys_no_proof(node_index, sdk_wallet_stewards, sdk_pool_handle, looper, txnPoolNodeSet) + update_bls_keys_no_proof(node_index, vdr_wallet_stewards, vdr_pool_handle, looper, txnPoolNodeSet) monkeypatch.undo() with update_validate_bls_signature_without_key_proof(txnPoolNodeSet, validate_bls_signature_without_key_proof): if validate_bls_signature_without_key_proof: - sdk_send_random_and_check(looper, txnPoolNodeSet, - sdk_pool_handle, sdk_wallet_stewards[3], 1) + vdr_send_random_and_check(looper, txnPoolNodeSet, + vdr_pool_handle, vdr_wallet_stewards[3], 1) else: with pytest.raises(PoolLedgerTimeoutException): - sdk_send_random_and_check(looper, txnPoolNodeSet, - sdk_pool_handle, sdk_wallet_stewards[3], 1) + vdr_send_random_and_check(looper, txnPoolNodeSet, + vdr_pool_handle, vdr_wallet_stewards[3], 1) diff --git a/plenum/test/bls/test_state_proof.py b/plenum/test/bls/test_state_proof.py index 95f87b87c1..ff0a140569 100644 --- a/plenum/test/bls/test_state_proof.py +++ b/plenum/test/bls/test_state_proof.py @@ -1,3 +1,4 @@ +import json from plenum.common.constants import ROOT_HASH, MULTI_SIGNATURE, PROOF_NODES, TXN_TYPE, DATA, TXN_TIME, STATE_PROOF, \ MULTI_SIGNATURE_VALUE, MULTI_SIGNATURE_PARTICIPANTS, MULTI_SIGNATURE_SIGNATURE, \ MULTI_SIGNATURE_VALUE_LEDGER_ID, \ @@ -12,8 +13,8 @@ from plenum.test.buy_handler import BuyHandler from plenum.test.constants import GET_BUY from plenum.test.helper import wait_for_requests_ordered, \ - randomOperation, sdk_send_random_requests, sdk_json_couples_to_request_list, sdk_send_random_and_check, \ - sdk_json_to_request_object + randomOperation, vdr_send_random_requests, vdr_json_couples_to_request_list, vdr_send_random_and_check, \ + vdr_json_to_request_object, vdr_json_to_plenum_request_object nodeCount = 4 nodes_wth_bls = 4 @@ -46,10 +47,10 @@ def check_result(txnPoolNodeSet, req, should_have_proof): def test_make_proof_bls_enabled(looper, txnPoolNodeSet, - sdk_pool_handle, sdk_wallet_client): - reqs = sdk_json_couples_to_request_list( - sdk_send_random_requests( - looper, sdk_pool_handle, sdk_wallet_client, 1)) + vdr_pool_handle, vdr_wallet_client): + reqs = vdr_json_couples_to_request_list( + vdr_send_random_requests( + looper, vdr_pool_handle, vdr_wallet_client, 1)) wait_for_requests_ordered(looper, txnPoolNodeSet, reqs) req = reqs[0] @@ -85,9 +86,9 @@ def test_make_proof_bls_enabled(looper, txnPoolNodeSet, def test_make_result_bls_enabled(looper, txnPoolNodeSet, - sdk_pool_handle, sdk_wallet_client): - req_dict, _ = sdk_send_random_requests(looper, sdk_pool_handle, sdk_wallet_client, 1)[0] - req = sdk_json_to_request_object(req_dict) + vdr_pool_handle, vdr_wallet_client): + req_dict, _ = vdr_send_random_requests(looper, vdr_pool_handle, vdr_wallet_client, 1)[0] + req = vdr_json_to_plenum_request_object(req_dict) wait_for_requests_ordered(looper, txnPoolNodeSet, [req]) assert req.protocolVersion @@ -117,12 +118,12 @@ def test_make_result_protocol_version_less_than_state_proof(looper, def test_proof_in_write_reply(looper, txnPoolNodeSet, - sdk_pool_handle, sdk_wallet_client): - resp = sdk_send_random_and_check(looper, txnPoolNodeSet, - sdk_pool_handle, sdk_wallet_client, 1) + vdr_pool_handle, vdr_wallet_client): + resp = vdr_send_random_and_check(looper, txnPoolNodeSet, + vdr_pool_handle, vdr_wallet_client, 1) req = resp[0][0] - result = resp[0][1]['result'] + result = json.loads(resp[0][1]["Alpha"])['result'] # All nodes now return a reply from the request. All same info select alpha for test purposes assert result assert get_type(result) == "buy" @@ -154,9 +155,9 @@ def test_proof_in_write_reply(looper, txnPoolNodeSet, def test_make_proof_committed_head_used(looper, txnPoolNodeSet, - sdk_pool_handle, sdk_wallet_client): - req_dict, _ = sdk_send_random_requests(looper, sdk_pool_handle, sdk_wallet_client, 1)[0] - req = sdk_json_to_request_object(req_dict) + vdr_pool_handle, vdr_wallet_client): + req_dict, _ = vdr_send_random_requests(looper, vdr_pool_handle, vdr_wallet_client, 1)[0] + req = vdr_json_to_plenum_request_object(req_dict) wait_for_requests_ordered(looper, txnPoolNodeSet, [req]) key = BuyHandler.prepare_buy_key(req.identifier, req.reqId) diff --git a/plenum/test/bls/test_update_bls_key.py b/plenum/test/bls/test_update_bls_key.py index 910b764b42..b9e6461b7e 100644 --- a/plenum/test/bls/test_update_bls_key.py +++ b/plenum/test/bls/test_update_bls_key.py @@ -8,60 +8,60 @@ # rotating BLS keys one by one, eventually we will have all keys changed def test_update_bls_one_node(looper, txnPoolNodeSet, - sdk_wallet_stewards, - sdk_wallet_client, - sdk_pool_handle): + vdr_wallet_stewards, + vdr_wallet_client, + vdr_pool_handle): ''' Rotated BLS key for 1st node; BLS multi-signatures must be calculated for all Nodes. ''' check_update_bls_key(node_num=0, saved_multi_sigs_count=4, looper=looper, txnPoolNodeSet=txnPoolNodeSet, - sdk_wallet_stewards=sdk_wallet_stewards, - sdk_wallet_client=sdk_wallet_client, - sdk_pool_handle=sdk_pool_handle) + sdk_wallet_stewards=vdr_wallet_stewards, + sdk_wallet_client=vdr_wallet_client, + sdk_pool_handle=vdr_pool_handle) def test_update_bls_two_nodes(looper, txnPoolNodeSet, - sdk_wallet_stewards, - sdk_wallet_client, - sdk_pool_handle): + vdr_wallet_stewards, + vdr_wallet_client, + vdr_pool_handle): ''' Rotated BLS key for 1st and 2d nodes; BLS multi-signatures must be calculated for all Nodes. ''' check_update_bls_key(node_num=1, saved_multi_sigs_count=4, looper=looper, txnPoolNodeSet=txnPoolNodeSet, - sdk_wallet_stewards=sdk_wallet_stewards, - sdk_wallet_client=sdk_wallet_client, - sdk_pool_handle=sdk_pool_handle) + sdk_wallet_stewards=vdr_wallet_stewards, + sdk_wallet_client=vdr_wallet_client, + sdk_pool_handle=vdr_pool_handle) def test_update_bls_three_nodes(looper, txnPoolNodeSet, - sdk_wallet_stewards, - sdk_wallet_client, - sdk_pool_handle): + vdr_wallet_stewards, + vdr_wallet_client, + vdr_pool_handle): ''' Rotated BLS key for 1-3 Nodes; BLS multi-signatures must be calculated for all Nodes. ''' check_update_bls_key(node_num=2, saved_multi_sigs_count=4, looper=looper, txnPoolNodeSet=txnPoolNodeSet, - sdk_wallet_stewards=sdk_wallet_stewards, - sdk_wallet_client=sdk_wallet_client, - sdk_pool_handle=sdk_pool_handle) + sdk_wallet_stewards=vdr_wallet_stewards, + sdk_wallet_client=vdr_wallet_client, + sdk_pool_handle=vdr_pool_handle) def test_update_bls_all_nodes(looper, txnPoolNodeSet, - sdk_wallet_stewards, - sdk_wallet_client, - sdk_pool_handle): + vdr_wallet_stewards, + vdr_wallet_client, + vdr_pool_handle): ''' Rotated BLS key for all Nodes; BLS multi-signatures must be calculated for all Nodes. ''' check_update_bls_key(node_num=3, saved_multi_sigs_count=4, looper=looper, txnPoolNodeSet=txnPoolNodeSet, - sdk_wallet_stewards=sdk_wallet_stewards, - sdk_wallet_client=sdk_wallet_client, - sdk_pool_handle=sdk_pool_handle) + sdk_wallet_stewards=vdr_wallet_stewards, + sdk_wallet_client=vdr_wallet_client, + sdk_pool_handle=vdr_pool_handle) diff --git a/plenum/test/bls/test_update_incorrect_bls_key.py b/plenum/test/bls/test_update_incorrect_bls_key.py index d7892dbacb..4354f18b1b 100644 --- a/plenum/test/bls/test_update_incorrect_bls_key.py +++ b/plenum/test/bls/test_update_incorrect_bls_key.py @@ -12,9 +12,9 @@ # rotating BLS keys one by one, eventually we will have all keys changed def test_update_incorrect_bls_one_node(looper, txnPoolNodeSet, - sdk_wallet_stewards, - sdk_wallet_client, - sdk_pool_handle): + vdr_wallet_stewards, + vdr_wallet_client, + vdr_pool_handle): ''' Updated with wrong BLS key for 1st Node; Expect that BLS multi-sigs are applied since we have 3 correct signatures @@ -32,55 +32,55 @@ def patched_set_validators(self, validators): node.quorums.commit = Quorum(nodeCount) check_update_bls_key(node_num=0, saved_multi_sigs_count=4, looper=looper, txnPoolNodeSet=txnPoolNodeSet, - sdk_wallet_stewards=sdk_wallet_stewards, - sdk_wallet_client=sdk_wallet_client, - sdk_pool_handle=sdk_pool_handle, + sdk_wallet_stewards=vdr_wallet_stewards, + sdk_wallet_client=vdr_wallet_client, + sdk_pool_handle=vdr_pool_handle, add_wrong=True) def test_update_incorrect_bls_two_nodes(looper, txnPoolNodeSet, - sdk_wallet_stewards, - sdk_wallet_client, - sdk_pool_handle): + vdr_wallet_stewards, + vdr_wallet_client, + vdr_pool_handle): ''' Updated with wrong BLS key for 1st and 2d Nodes; do not expect that BLS multi-sigs are applied (we have less than n-f correct BLS sigs) ''' check_update_bls_key(node_num=1, saved_multi_sigs_count=0, looper=looper, txnPoolNodeSet=txnPoolNodeSet, - sdk_wallet_stewards=sdk_wallet_stewards, - sdk_wallet_client=sdk_wallet_client, - sdk_pool_handle=sdk_pool_handle, + sdk_wallet_stewards=vdr_wallet_stewards, + sdk_wallet_client=vdr_wallet_client, + sdk_pool_handle=vdr_pool_handle, add_wrong=True) def test_update_incorrect_bls_three_nodes(looper, txnPoolNodeSet, - sdk_wallet_stewards, - sdk_wallet_client, - sdk_pool_handle): + vdr_wallet_stewards, + vdr_wallet_client, + vdr_pool_handle): ''' Updated with wrong BLS keys 1-3 Nodes; do not expect that BLS multi-sigs are applied (we have less than n-f correct BLS sigs) ''' check_update_bls_key(node_num=2, saved_multi_sigs_count=0, looper=looper, txnPoolNodeSet=txnPoolNodeSet, - sdk_wallet_stewards=sdk_wallet_stewards, - sdk_wallet_client=sdk_wallet_client, - sdk_pool_handle=sdk_pool_handle, + sdk_wallet_stewards=vdr_wallet_stewards, + sdk_wallet_client=vdr_wallet_client, + sdk_pool_handle=vdr_pool_handle, add_wrong=True) def test_update_incorrect_bls_all_nodes(looper, txnPoolNodeSet, - sdk_wallet_stewards, - sdk_wallet_client, - sdk_pool_handle): + vdr_wallet_stewards, + vdr_wallet_client, + vdr_pool_handle): ''' Updated with wrong BLS keys all Nodes; do not expect that BLS multi-sigs are applied (we have less than n-f correct BLS sigs) ''' check_update_bls_key(node_num=3, saved_multi_sigs_count=0, looper=looper, txnPoolNodeSet=txnPoolNodeSet, - sdk_wallet_stewards=sdk_wallet_stewards, - sdk_wallet_client=sdk_wallet_client, - sdk_pool_handle=sdk_pool_handle, + sdk_wallet_stewards=vdr_wallet_stewards, + sdk_wallet_client=vdr_wallet_client, + sdk_pool_handle=vdr_pool_handle, add_wrong=True) diff --git a/plenum/test/checkpoints/test_backup_replica_resumes_ordering_on_lag_in_checkpoints.py b/plenum/test/checkpoints/test_backup_replica_resumes_ordering_on_lag_in_checkpoints.py index 4b6209ce99..b4f7cdcc04 100644 --- a/plenum/test/checkpoints/test_backup_replica_resumes_ordering_on_lag_in_checkpoints.py +++ b/plenum/test/checkpoints/test_backup_replica_resumes_ordering_on_lag_in_checkpoints.py @@ -7,7 +7,7 @@ from plenum.test import waits from plenum.test.checkpoints.helper import check_num_quorumed_received_checkpoints, check_num_unstable_checkpoints from plenum.test.delayers import cDelay, chk_delay, msg_rep_delay -from plenum.test.helper import sdk_send_random_requests, assertExp, sdk_send_random_and_check, assert_eq, get_pp_seq_no, \ +from plenum.test.helper import vdr_send_random_requests, assertExp, vdr_send_random_and_check, assert_eq, get_pp_seq_no, \ check_last_ordered_3pc_backup from stp_core.loop.eventually import eventually @@ -30,18 +30,18 @@ def tconf(tconf): def test_backup_replica_resumes_ordering_on_lag_in_checkpoints( looper, chkFreqPatched, reqs_for_checkpoint, - one_replica_and_others_in_backup_instance, - sdk_pool_handle, sdk_wallet_client, view_change_done, txnPoolNodeSet): + vdr_one_replica_and_others_in_backup_instance, + vdr_pool_handle, vdr_wallet_client, vdr_view_change_done, txnPoolNodeSet): """ Verifies resumption of ordering 3PC-batches on a backup replica on detection of a lag in checkpoints """ - slow_replica, other_replicas = one_replica_and_others_in_backup_instance + slow_replica, other_replicas = vdr_one_replica_and_others_in_backup_instance view_no = slow_replica.viewNo batches_count = slow_replica.last_ordered_3pc[1] # Send a request and ensure that the replica orders the batch for it - sdk_send_random_requests(looper, sdk_pool_handle, sdk_wallet_client, 1) + vdr_send_random_requests(looper, vdr_pool_handle, vdr_wallet_client, 1) batches_count += 1 low_watermark = slow_replica.h @@ -58,7 +58,7 @@ def test_backup_replica_resumes_ordering_on_lag_in_checkpoints( # Send a request for which the replica will not be able to order the batch # due to an insufficient count of Commits - sdk_send_random_requests(looper, sdk_pool_handle, sdk_wallet_client, 1) + vdr_send_random_requests(looper, vdr_pool_handle, vdr_wallet_client, 1) looper.runFor(waits.expectedTransactionExecutionTime(nodeCount)) # Recover reception of Commits @@ -68,7 +68,7 @@ def test_backup_replica_resumes_ordering_on_lag_in_checkpoints( # Send requests but in a quantity insufficient # for catch-up number of checkpoints reqs_until_checkpoints = reqs_for_checkpoint - other_replicas[0].last_ordered_3pc[1] - sdk_send_random_requests(looper, sdk_pool_handle, sdk_wallet_client, + vdr_send_random_requests(looper, vdr_pool_handle, vdr_wallet_client, Replica.STASHED_CHECKPOINTS_BEFORE_CATCHUP * reqs_until_checkpoints) looper.runFor(waits.expectedTransactionExecutionTime(nodeCount)) @@ -98,8 +98,8 @@ def test_backup_replica_resumes_ordering_on_lag_in_checkpoints( check_num_quorumed_received_checkpoints(slow_replica, 1) # Send more requests to reach catch-up number of checkpoints - sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle, - sdk_wallet_client, reqs_for_checkpoint) + vdr_send_random_and_check(looper, txnPoolNodeSet, vdr_pool_handle, + vdr_wallet_client, reqs_for_checkpoint) batches_count += 1 batches_count += reqs_until_checkpoints batches_count += reqs_for_checkpoint @@ -130,7 +130,7 @@ def test_backup_replica_resumes_ordering_on_lag_in_checkpoints( check_num_quorumed_received_checkpoints(slow_replica, 0) # Send a request and ensure that the replica orders the batch for it - sdk_send_random_requests(looper, sdk_pool_handle, sdk_wallet_client, 1) + vdr_send_random_requests(looper, vdr_pool_handle, vdr_wallet_client, 1) batches_count += 1 looper.run( @@ -145,8 +145,8 @@ def test_backup_replica_resumes_ordering_on_lag_in_checkpoints( def test_backup_replica_resumes_ordering_on_lag_if_checkpoints_belate( looper, chkFreqPatched, reqs_for_checkpoint, - one_replica_and_others_in_backup_instance, - sdk_pool_handle, sdk_wallet_client, view_change_done, txnPoolNodeSet): + vdr_one_replica_and_others_in_backup_instance, + vdr_pool_handle, vdr_wallet_client, vdr_view_change_done, txnPoolNodeSet): """ Verifies resumption of ordering 3PC-batches on a backup replica on detection of a lag in checkpoints in case it is detected after @@ -156,14 +156,14 @@ def test_backup_replica_resumes_ordering_on_lag_if_checkpoints_belate( def check_last_ordered(replica, lo): assert replica.last_ordered_3pc == lo - slow_replica, other_replicas = one_replica_and_others_in_backup_instance + slow_replica, other_replicas = vdr_one_replica_and_others_in_backup_instance view_no = slow_replica.viewNo check_last_ordered_3pc_backup(slow_replica.node, other_replicas[0].node) batches_count = slow_replica.last_ordered_3pc[1] low_watermark = slow_replica.h # Send a request and ensure that the replica orders the batch for it - sdk_send_random_requests(looper, sdk_pool_handle, sdk_wallet_client, 1) + vdr_send_random_requests(looper, vdr_pool_handle, vdr_wallet_client, 1) batches_count += 1 looper.run( @@ -183,7 +183,7 @@ def check_last_ordered(replica, lo): # Send a request for which the replica will not be able to order the batch # due to an insufficient count of Commits - sdk_send_random_requests(looper, sdk_pool_handle, sdk_wallet_client, 1) + vdr_send_random_requests(looper, vdr_pool_handle, vdr_wallet_client, 1) looper.runFor(waits.expectedTransactionExecutionTime(nodeCount)) # Receive further Commits from now on @@ -197,7 +197,7 @@ def check_last_ordered(replica, lo): # Send requests but in a quantity insufficient # for catch-up number of checkpoints reqs_until_checkpoints = reqs_for_checkpoint - other_replicas[0].last_ordered_3pc[1] - sdk_send_random_requests(looper, sdk_pool_handle, sdk_wallet_client, + vdr_send_random_requests(looper, vdr_pool_handle, vdr_wallet_client, Replica.STASHED_CHECKPOINTS_BEFORE_CATCHUP * reqs_until_checkpoints) looper.runFor(waits.expectedTransactionExecutionTime(nodeCount)) @@ -206,10 +206,10 @@ def check_last_ordered(replica, lo): slow_replica.node.nodeIbStasher.delay(chk_delay(instId=1)) # Send more requests to reach catch-up number of checkpoints - sdk_send_random_requests(looper, sdk_pool_handle, sdk_wallet_client, + vdr_send_random_requests(looper, vdr_pool_handle, vdr_wallet_client, reqs_for_checkpoint) # Send a request that starts a new checkpoint - sdk_send_random_requests(looper, sdk_pool_handle, sdk_wallet_client, 1) + vdr_send_random_requests(looper, vdr_pool_handle, vdr_wallet_client, 1) looper.runFor(waits.expectedTransactionExecutionTime(nodeCount)) # Ensure that the replica has not ordered any batches @@ -244,7 +244,7 @@ def check_last_ordered(replica, lo): check_num_quorumed_received_checkpoints(slow_replica, 0) # Send a request and ensure that the replica orders the batch for it - sdk_send_random_requests(looper, sdk_pool_handle, sdk_wallet_client, 1) + vdr_send_random_requests(looper, vdr_pool_handle, vdr_wallet_client, 1) batches_count += 1 looper.run( diff --git a/plenum/test/checkpoints/test_basic_checkpointing.py b/plenum/test/checkpoints/test_basic_checkpointing.py index f4a67f83f1..ff1aa2ba98 100644 --- a/plenum/test/checkpoints/test_basic_checkpointing.py +++ b/plenum/test/checkpoints/test_basic_checkpointing.py @@ -1,25 +1,25 @@ from plenum.test.checkpoints.helper import check_for_nodes, check_stable_checkpoint, check_num_unstable_checkpoints from stp_core.loop.eventually import eventually from plenum.test import waits -from plenum.test.helper import sdk_send_random_and_check +from plenum.test.helper import vdr_send_random_and_check # TODO: Probably these tests needs to be reworked, especially deletion test -def test_checkpoint_created(chkFreqPatched, tconf, looper, txnPoolNodeSet, sdk_pool_handle, - sdk_wallet_client, reqs_for_checkpoint): +def test_checkpoint_created(chkFreqPatched, tconf, looper, txnPoolNodeSet, vdr_pool_handle, + vdr_wallet_client, reqs_for_checkpoint): """ After requests less than `CHK_FREQ`, there should be one checkpoint on each replica. After `CHK_FREQ`, one checkpoint should become stable """ # Send one batch less so checkpoint is not created - sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_client, + vdr_send_random_and_check(looper, txnPoolNodeSet, vdr_pool_handle, vdr_wallet_client, reqs_for_checkpoint - (chkFreqPatched.Max3PCBatchSize)) # Deliberately waiting so as to verify that checkpoint is not created nor stabilized looper.run(eventually(check_for_nodes, txnPoolNodeSet, check_stable_checkpoint, 0)) looper.run(eventually(check_for_nodes, txnPoolNodeSet, check_num_unstable_checkpoints, 0)) - sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_client, + vdr_send_random_and_check(looper, txnPoolNodeSet, vdr_pool_handle, vdr_wallet_client, chkFreqPatched.Max3PCBatchSize) next_checkpoint = tconf.CHK_FREQ @@ -29,15 +29,15 @@ def test_checkpoint_created(chkFreqPatched, tconf, looper, txnPoolNodeSet, sdk_p check_for_nodes(txnPoolNodeSet, check_num_unstable_checkpoints, 0) -def test_old_checkpoint_deleted(tconf, looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_client, reqs_for_checkpoint): +def test_old_checkpoint_deleted(tconf, looper, txnPoolNodeSet, vdr_pool_handle, vdr_wallet_client, reqs_for_checkpoint): """ Send requests more than twice of `CHK_FREQ`, there should be one new stable checkpoint on each replica. The old stable checkpoint should be removed """ next_checkpoint = txnPoolNodeSet[0].master_replica._consensus_data.stable_checkpoint + 2 * tconf.CHK_FREQ - sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_client, 2 * reqs_for_checkpoint) + vdr_send_random_and_check(looper, txnPoolNodeSet, vdr_pool_handle, vdr_wallet_client, 2 * reqs_for_checkpoint) - sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_client, 1) + vdr_send_random_and_check(looper, txnPoolNodeSet, vdr_pool_handle, vdr_wallet_client, 1) timeout = waits.expectedTransactionExecutionTime(len(txnPoolNodeSet)) looper.run(eventually(check_for_nodes, txnPoolNodeSet, check_stable_checkpoint, next_checkpoint, diff --git a/plenum/test/checkpoints/test_checkpoint_bounds_after_catchup.py b/plenum/test/checkpoints/test_checkpoint_bounds_after_catchup.py index dc63125085..172be4a5e6 100644 --- a/plenum/test/checkpoints/test_checkpoint_bounds_after_catchup.py +++ b/plenum/test/checkpoints/test_checkpoint_bounds_after_catchup.py @@ -1,9 +1,9 @@ from plenum.test.delayers import cDelay from plenum.test.checkpoints.helper import check_stable_checkpoint -from plenum.test.helper import sdk_send_random_and_check +from plenum.test.helper import vdr_send_random_and_check from plenum.test.node_catchup.helper import waitNodeDataEquality, ensure_all_nodes_have_same_data -from plenum.test.pool_transactions.helper import sdk_add_new_steward_and_node +from plenum.test.pool_transactions.helper import vdr_add_new_steward_and_node from plenum.test.stasher import delay_rules_without_processing from plenum.test.test_node import checkNodesConnected @@ -12,12 +12,12 @@ def test_upper_bound_of_checkpoint_after_catchup_is_divisible_by_chk_freq( chkFreqPatched, looper, txnPoolNodeSet, - sdk_pool_handle, sdk_wallet_steward, sdk_wallet_client, tdir, + vdr_pool_handle, vdr_wallet_steward, vdr_wallet_client, tdir, tconf, allPluginsPath): lagging_node = txnPoolNodeSet[-1] with delay_rules_without_processing(lagging_node.nodeIbStasher, cDelay()): - sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle, - sdk_wallet_client, tconf.Max3PCBatchSize * CHK_FREQ * 2 + 1) + vdr_send_random_and_check(looper, txnPoolNodeSet, vdr_pool_handle, + vdr_wallet_client, tconf.Max3PCBatchSize * CHK_FREQ * 2 + 1) ensure_all_nodes_have_same_data(looper, txnPoolNodeSet) waitNodeDataEquality(looper, lagging_node, *txnPoolNodeSet[:-1], exclude_from_check=['check_last_ordered_3pc_backup']) @@ -25,8 +25,8 @@ def test_upper_bound_of_checkpoint_after_catchup_is_divisible_by_chk_freq( # NYM transaction and the batch with Epsilon NODE transaction. # Epsilon got these transactions via catch-up. - sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle, - sdk_wallet_client, (CHK_FREQ - 1) * tconf.Max3PCBatchSize) + vdr_send_random_and_check(looper, txnPoolNodeSet, vdr_pool_handle, + vdr_wallet_client, (CHK_FREQ - 1) * tconf.Max3PCBatchSize) for replica in txnPoolNodeSet[0].replicas.values(): check_stable_checkpoint(replica, CHK_FREQ * 3) diff --git a/plenum/test/checkpoints/test_checkpoint_stabilization_after_catchup.py b/plenum/test/checkpoints/test_checkpoint_stabilization_after_catchup.py index 2dd11bddd2..a0ffb37f9d 100644 --- a/plenum/test/checkpoints/test_checkpoint_stabilization_after_catchup.py +++ b/plenum/test/checkpoints/test_checkpoint_stabilization_after_catchup.py @@ -3,7 +3,7 @@ from plenum.test import waits from plenum.test.checkpoints.helper import check_stable_checkpoint, check_num_received_checkpoints, \ check_num_unstable_checkpoints -from plenum.test.helper import sdk_send_random_and_check +from plenum.test.helper import vdr_send_random_and_check from plenum.test.node_catchup.helper import waitNodeDataEquality from plenum.test.stasher import delay_rules_without_processing CHK_FREQ = 5 @@ -11,17 +11,17 @@ def test_second_checkpoint_after_catchup_can_be_stabilized( - chkFreqPatched, looper, txnPoolNodeSet, sdk_wallet_steward, - sdk_wallet_client, sdk_pool_handle, tdir, tconf, + chkFreqPatched, looper, txnPoolNodeSet, vdr_wallet_steward, + vdr_wallet_client, vdr_pool_handle, tdir, tconf, allPluginsPath): lagging_node = txnPoolNodeSet[-1] with delay_rules_without_processing(lagging_node.nodeIbStasher, cDelay()): - sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle, - sdk_wallet_client, tconf.Max3PCBatchSize * CHK_FREQ * 2) + vdr_send_random_and_check(looper, txnPoolNodeSet, vdr_pool_handle, + vdr_wallet_client, tconf.Max3PCBatchSize * CHK_FREQ * 2) waitNodeDataEquality(looper, lagging_node, *txnPoolNodeSet[:-1]) # Epsilon got lost transactions via catch-up. - sdk_send_random_and_check(looper, txnPoolNodeSet, - sdk_pool_handle, sdk_wallet_client, 2) + vdr_send_random_and_check(looper, txnPoolNodeSet, + vdr_pool_handle, vdr_wallet_client, 2) master_replica = lagging_node.master_replica @@ -31,15 +31,15 @@ def test_second_checkpoint_after_catchup_can_be_stabilized( assert master_replica.h == 10 assert master_replica.H == 25 - sdk_send_random_and_check(looper, txnPoolNodeSet, - sdk_pool_handle, sdk_wallet_client, 1) + vdr_send_random_and_check(looper, txnPoolNodeSet, + vdr_pool_handle, vdr_wallet_client, 1) for replica in lagging_node.replicas.values(): assert replica.h == 10 assert replica.H == 25 - sdk_send_random_and_check(looper, txnPoolNodeSet, - sdk_pool_handle, sdk_wallet_client, 6) + vdr_send_random_and_check(looper, txnPoolNodeSet, + vdr_pool_handle, vdr_wallet_client, 6) stabilization_timeout = \ waits.expectedTransactionExecutionTime(len(txnPoolNodeSet)) looper.runFor(stabilization_timeout) @@ -54,8 +54,8 @@ def test_second_checkpoint_after_catchup_can_be_stabilized( assert replica.h == 15 assert replica.H == 30 - sdk_send_random_and_check(looper, txnPoolNodeSet, - sdk_pool_handle, sdk_wallet_client, 1) + vdr_send_random_and_check(looper, txnPoolNodeSet, + vdr_pool_handle, vdr_wallet_client, 1) looper.runFor(stabilization_timeout) for replica in lagging_node.replicas.values(): diff --git a/plenum/test/checkpoints/test_checkpoint_stable_while_unstashing.py b/plenum/test/checkpoints/test_checkpoint_stable_while_unstashing.py index 6289fec24e..66c26e4b98 100644 --- a/plenum/test/checkpoints/test_checkpoint_stable_while_unstashing.py +++ b/plenum/test/checkpoints/test_checkpoint_stable_while_unstashing.py @@ -1,6 +1,6 @@ from plenum.test.checkpoints.helper import check_for_nodes, check_stable_checkpoint, check_received_checkpoint_votes from plenum.test.delayers import ppDelay, msg_rep_delay -from plenum.test.helper import sdk_send_random_and_check, assertExp +from plenum.test.helper import vdr_send_random_and_check, assertExp from plenum.test.node_catchup.helper import waitNodeDataEquality from plenum.test.stasher import delay_rules from stp_core.loop.eventually import eventually @@ -13,22 +13,22 @@ def test_stabilize_checkpoint_while_unstashing_when_missing_pre_prepare(looper, chkFreqPatched, reqs_for_checkpoint, txnPoolNodeSet, - sdk_pool_handle, - sdk_wallet_client): + vdr_pool_handle, + vdr_wallet_client): # Prepare nodes lagging_node = txnPoolNodeSet[-1] lagging_master_replcia = lagging_node.master_replica rest_nodes = txnPoolNodeSet[:-1] # 1. send enough requests so that just 1 is left for checkpoint stabilization - sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle, - sdk_wallet_client, reqs_for_checkpoint - 1) + vdr_send_random_and_check(looper, txnPoolNodeSet, vdr_pool_handle, + vdr_wallet_client, reqs_for_checkpoint - 1) # 2. delay PrePrepare on 1 node so that prepares and commits will be stashed with delay_rules(lagging_node.nodeIbStasher, ppDelay()): with delay_rules(lagging_node.nodeIbStasher, msg_rep_delay()): - sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle, - sdk_wallet_client, 1) + vdr_send_random_and_check(looper, txnPoolNodeSet, vdr_pool_handle, + vdr_wallet_client, 1) # all good nodes stabilized checkpoint looper.run(eventually(check_for_nodes, rest_nodes, check_stable_checkpoint, 5)) diff --git a/plenum/test/checkpoints/test_checkpoints_removal_in_view_change.py b/plenum/test/checkpoints/test_checkpoints_removal_in_view_change.py index 5865a69008..ecd96be544 100644 --- a/plenum/test/checkpoints/test_checkpoints_removal_in_view_change.py +++ b/plenum/test/checkpoints/test_checkpoints_removal_in_view_change.py @@ -3,8 +3,8 @@ from plenum.common.constants import CHECKPOINT, COMMIT from plenum.test.delayers import cDelay, chk_delay -from plenum.test.helper import sdk_send_random_requests, \ - sdk_get_and_check_replies, sdk_send_random_and_check +from plenum.test.helper import vdr_send_random_requests, \ + vdr_get_and_check_replies, vdr_send_random_and_check from plenum.test.node_catchup.helper import ensure_all_nodes_have_same_data from plenum.test.test_node import ensureElectionsDone from plenum.test.view_change.helper import ensure_view_change @@ -17,8 +17,8 @@ def test_checkpoints_removed_in_view_change(chkFreqPatched, txnPoolNodeSet, looper, - sdk_pool_handle, - sdk_wallet_client): + vdr_pool_handle, + vdr_wallet_client): ''' Check that checkpoint finalize in view change before catchup doesn't clean necessary data from requests and 3pc queues. @@ -28,14 +28,14 @@ def test_checkpoints_removed_in_view_change(chkFreqPatched, # delay checkpoints processing for slow_nodes delay_msg(slow_nodes, chk_delay) # send txns for finalizing current checkpoint - sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle, - sdk_wallet_client, CHK_FREQ) + vdr_send_random_and_check(looper, txnPoolNodeSet, vdr_pool_handle, + vdr_wallet_client, CHK_FREQ) ensure_all_nodes_have_same_data(looper, nodes=txnPoolNodeSet) # delay commits processing for slow_nodes delay_msg(slow_nodes, cDelay) - requests = sdk_send_random_requests(looper, sdk_pool_handle, - sdk_wallet_client, 1) + requests = vdr_send_random_requests(looper, vdr_pool_handle, + vdr_wallet_client, 1) # check that slow nodes have prepared certificate with new txn looper.run(eventually(last_prepared_certificate, slow_nodes, @@ -71,7 +71,7 @@ def test_checkpoints_removed_in_view_change(chkFreqPatched, # because slow_nodes contains 3 nodes and without their replies sdk method # for get reply will not successfully finish. reset_delay(slow_nodes, COMMIT) - sdk_get_and_check_replies(looper, requests) + vdr_get_and_check_replies(looper, requests) looper.run(eventually(last_ordered_check, txnPoolNodeSet, (0, CHK_FREQ + 1))) @@ -80,8 +80,8 @@ def test_checkpoints_removed_in_view_change(chkFreqPatched, for n in slow_nodes: assert (1, CHK_FREQ) not in n.master_replica._checkpointer._checkpoint_state # check that all nodes have same data after new txns ordering - sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle, - sdk_wallet_client, CHK_FREQ) + vdr_send_random_and_check(looper, txnPoolNodeSet, vdr_pool_handle, + vdr_wallet_client, CHK_FREQ) ensure_all_nodes_have_same_data(looper, nodes=txnPoolNodeSet) diff --git a/plenum/test/checkpoints/test_complete_short_checkpoint_not_included_in_lag_for_catchup.py b/plenum/test/checkpoints/test_complete_short_checkpoint_not_included_in_lag_for_catchup.py index 4a9d7f4a8c..11bf4e598b 100644 --- a/plenum/test/checkpoints/test_complete_short_checkpoint_not_included_in_lag_for_catchup.py +++ b/plenum/test/checkpoints/test_complete_short_checkpoint_not_included_in_lag_for_catchup.py @@ -9,7 +9,7 @@ from plenum.test.helper import send_reqs_batches_and_get_suff_replies from plenum.test.node_catchup.helper import waitNodeDataEquality, \ checkNodeDataForInequality, waitNodeDataInequality, get_number_of_completed_catchups -from plenum.test.pool_transactions.helper import sdk_add_new_steward_and_node +from plenum.test.pool_transactions.helper import vdr_add_new_steward_and_node from plenum.test.test_node import checkNodesConnected, ensureElectionsDone logger = getLogger() @@ -25,7 +25,7 @@ def test_complete_short_checkpoint_not_included_in_lag_for_catchup( looper, chkFreqPatched, reqs_for_checkpoint, txnPoolNodeSet, - sdk_pool_handle, sdk_wallet_steward, sdk_wallet_client, + vdr_pool_handle, vdr_wallet_steward, vdr_wallet_client, tdir, tconf, allPluginsPath): """ Verifies that if the first stored own checkpoint has a not aligned lower @@ -38,8 +38,8 @@ def test_complete_short_checkpoint_not_included_in_lag_for_catchup( """ max_batch_size = chkFreqPatched.Max3PCBatchSize - _, new_node = sdk_add_new_steward_and_node( - looper, sdk_pool_handle, sdk_wallet_steward, + _, new_node = vdr_add_new_steward_and_node( + looper, vdr_pool_handle, vdr_wallet_steward, 'EpsilonSteward', 'Epsilon', tdir, tconf, allPluginsPath=allPluginsPath) txnPoolNodeSet.append(new_node) @@ -56,8 +56,8 @@ def test_complete_short_checkpoint_not_included_in_lag_for_catchup( # transaction). This checkpoint has a not aligned lower bound # on the new node replicas so it will not be stabilized on them. send_reqs_batches_and_get_suff_replies(looper, txnPoolNodeSet, - sdk_pool_handle, - sdk_wallet_client, + vdr_pool_handle, + vdr_wallet_client, reqs_for_checkpoint - 2 * max_batch_size) waitNodeDataEquality(looper, new_node, *txnPoolNodeSet[:-1], exclude_from_check=['check_last_ordered_3pc_backup']) @@ -73,8 +73,8 @@ def test_complete_short_checkpoint_not_included_in_lag_for_catchup( # Replica.STASHED_CHECKPOINTS_BEFORE_CATCHUP + 1 quorumed stashed # checkpoints from others send_reqs_batches_and_get_suff_replies(looper, txnPoolNodeSet, - sdk_pool_handle, - sdk_wallet_client, + vdr_pool_handle, + vdr_wallet_client, Replica.STASHED_CHECKPOINTS_BEFORE_CATCHUP * reqs_for_checkpoint) @@ -91,8 +91,8 @@ def test_complete_short_checkpoint_not_included_in_lag_for_catchup( # Replica.STASHED_CHECKPOINTS_BEFORE_CATCHUP + 2 quorumed stashed # checkpoints from others send_reqs_batches_and_get_suff_replies(looper, txnPoolNodeSet, - sdk_pool_handle, - sdk_wallet_client, + vdr_pool_handle, + vdr_wallet_client, reqs_for_checkpoint) waitNodeDataEquality(looper, new_node, *txnPoolNodeSet[:-1], exclude_from_check=['check_last_ordered_3pc_backup']) diff --git a/plenum/test/checkpoints/test_discard_old_checkpoint_messages.py b/plenum/test/checkpoints/test_discard_old_checkpoint_messages.py index e5e0526eec..d60b5cdd35 100644 --- a/plenum/test/checkpoints/test_discard_old_checkpoint_messages.py +++ b/plenum/test/checkpoints/test_discard_old_checkpoint_messages.py @@ -4,17 +4,17 @@ from plenum.test.checkpoints.helper import check_for_instance, check_stable_checkpoint from stp_core.loop.eventually import eventually from plenum.test.helper import checkDiscardMsg -from plenum.test.helper import sdk_send_random_and_check +from plenum.test.helper import vdr_send_random_and_check def test_discard_checkpoint_msg_for_stable_checkpoint(chkFreqPatched, tconf, looper, txnPoolNodeSet, - sdk_pool_handle, - sdk_wallet_client, + vdr_pool_handle, + vdr_wallet_client, reqs_for_checkpoint): - sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle, - sdk_wallet_client, reqs_for_checkpoint) + vdr_send_random_and_check(looper, txnPoolNodeSet, vdr_pool_handle, + vdr_wallet_client, reqs_for_checkpoint) next_checkpoint = tconf.CHK_FREQ for inst_id in txnPoolNodeSet[0].replicas.keys(): looper.run(eventually(check_for_instance, txnPoolNodeSet, inst_id, diff --git a/plenum/test/checkpoints/test_incomplete_short_checkpoint_included_in_lag_for_catchup.py b/plenum/test/checkpoints/test_incomplete_short_checkpoint_included_in_lag_for_catchup.py index 260ff7da03..0aad158372 100644 --- a/plenum/test/checkpoints/test_incomplete_short_checkpoint_included_in_lag_for_catchup.py +++ b/plenum/test/checkpoints/test_incomplete_short_checkpoint_included_in_lag_for_catchup.py @@ -9,7 +9,7 @@ from plenum.test.helper import send_reqs_batches_and_get_suff_replies from plenum.test.node_catchup.helper import waitNodeDataEquality, \ checkNodeDataForInequality, get_number_of_completed_catchups -from plenum.test.pool_transactions.helper import sdk_add_new_steward_and_node +from plenum.test.pool_transactions.helper import vdr_add_new_steward_and_node from plenum.test.test_node import checkNodesConnected logger = getLogger() @@ -23,7 +23,7 @@ def test_incomplete_short_checkpoint_included_in_lag_for_catchup( looper, chkFreqPatched, reqs_for_checkpoint, txnPoolNodeSet, - sdk_pool_handle, sdk_wallet_steward, sdk_wallet_client, + vdr_pool_handle, vdr_wallet_steward, vdr_wallet_client, tdir, tconf, allPluginsPath): """ Verifies that if the first stored own checkpoint has a not aligned lower @@ -36,8 +36,8 @@ def test_incomplete_short_checkpoint_included_in_lag_for_catchup( """ max_batch_size = chkFreqPatched.Max3PCBatchSize - _, new_node = sdk_add_new_steward_and_node( - looper, sdk_pool_handle, sdk_wallet_steward, + _, new_node = vdr_add_new_steward_and_node( + looper, vdr_pool_handle, vdr_wallet_steward, 'EpsilonSteward', 'Epsilon', tdir, tconf, allPluginsPath=allPluginsPath) txnPoolNodeSet.append(new_node) @@ -52,8 +52,8 @@ def test_incomplete_short_checkpoint_included_in_lag_for_catchup( # with EpsilonSteward NYM transaction and with Epsilon NODE transaction. # This checkpoint has a not aligned lower bound on the new node replicas. send_reqs_batches_and_get_suff_replies(looper, txnPoolNodeSet, - sdk_pool_handle, - sdk_wallet_client, + vdr_pool_handle, + vdr_wallet_client, reqs_for_checkpoint - 4 * max_batch_size) # The master replica of the new node stops to receive 3PC-messages @@ -67,8 +67,8 @@ def test_incomplete_short_checkpoint_included_in_lag_for_catchup( # Replica.STASHED_CHECKPOINTS_BEFORE_CATCHUP quorumed stashed # checkpoints from others send_reqs_batches_and_get_suff_replies(looper, txnPoolNodeSet, - sdk_pool_handle, - sdk_wallet_client, + vdr_pool_handle, + vdr_wallet_client, (Replica.STASHED_CHECKPOINTS_BEFORE_CATCHUP - 1) * reqs_for_checkpoint + max_batch_size) @@ -85,8 +85,8 @@ def test_incomplete_short_checkpoint_included_in_lag_for_catchup( # Replica.STASHED_CHECKPOINTS_BEFORE_CATCHUP + 1 quorumed stashed # checkpoints from others send_reqs_batches_and_get_suff_replies(looper, txnPoolNodeSet, - sdk_pool_handle, - sdk_wallet_client, + vdr_pool_handle, + vdr_wallet_client, reqs_for_checkpoint) waitNodeDataEquality(looper, new_node, *txnPoolNodeSet[:-1], exclude_from_check=['check_last_ordered_3pc_backup']) diff --git a/plenum/test/checkpoints/test_lag_size_for_catchup.py b/plenum/test/checkpoints/test_lag_size_for_catchup.py index a26bd63bf8..d251a6870d 100644 --- a/plenum/test/checkpoints/test_lag_size_for_catchup.py +++ b/plenum/test/checkpoints/test_lag_size_for_catchup.py @@ -20,7 +20,7 @@ def test_lag_size_for_catchup( looper, chkFreqPatched, reqs_for_checkpoint, txnPoolNodeSet, - sdk_pool_handle, sdk_wallet_client): + vdr_pool_handle, vdr_wallet_client): """ Verifies that if the stored own checkpoints have aligned bounds then the master replica lag which makes the node perform catch-up is @@ -41,8 +41,8 @@ def test_lag_size_for_catchup( # Replica.STASHED_CHECKPOINTS_BEFORE_CATCHUP quorumed stashed checkpoints # from others send_reqs_batches_and_get_suff_replies(looper, txnPoolNodeSet, - sdk_pool_handle, - sdk_wallet_client, + vdr_pool_handle, + vdr_wallet_client, Replica.STASHED_CHECKPOINTS_BEFORE_CATCHUP * reqs_for_checkpoint) @@ -59,8 +59,8 @@ def test_lag_size_for_catchup( # Replica.STASHED_CHECKPOINTS_BEFORE_CATCHUP + 1 quorumed stashed # checkpoints from others send_reqs_batches_and_get_suff_replies(looper, txnPoolNodeSet, - sdk_pool_handle, - sdk_wallet_client, + vdr_pool_handle, + vdr_wallet_client, reqs_for_checkpoint) waitNodeDataEquality(looper, slow_node, *other_nodes, exclude_from_check=['check_last_ordered_3pc_backup']) diff --git a/plenum/test/checkpoints/test_lagged_checkpoint_completion.py b/plenum/test/checkpoints/test_lagged_checkpoint_completion.py index 7fa201a86f..db03ed45ac 100644 --- a/plenum/test/checkpoints/test_lagged_checkpoint_completion.py +++ b/plenum/test/checkpoints/test_lagged_checkpoint_completion.py @@ -2,14 +2,14 @@ from plenum.test.checkpoints.helper import check_num_received_checkpoints, \ check_received_checkpoint_votes, check_stable_checkpoint, check_num_unstable_checkpoints from plenum.test.delayers import cDelay -from plenum.test.helper import sdk_send_random_and_check +from plenum.test.helper import vdr_send_random_and_check from stp_core.loop.eventually import eventually CHK_FREQ = 5 def test_lagged_checkpoint_completion(chkFreqPatched, looper, txnPoolNodeSet, - sdk_wallet_client, sdk_pool_handle): + vdr_wallet_client, vdr_pool_handle): """ One node in a pool lags to order the last 3PC-batch in a checkpoint so that when it eventually orders this 3PC-batch and thus completes the checkpoint @@ -23,13 +23,13 @@ def test_lagged_checkpoint_completion(chkFreqPatched, looper, txnPoolNodeSet, # checkpoint except the last 3PC-batch. The last 3PC-batch in the # checkpoint is ordered by all the nodes except one slow node because this # node lags to receive Commits. - sdk_send_random_and_check(looper, txnPoolNodeSet, - sdk_pool_handle, sdk_wallet_client, 4) + vdr_send_random_and_check(looper, txnPoolNodeSet, + vdr_pool_handle, vdr_wallet_client, 4) slow_node.nodeIbStasher.delay(cDelay()) - sdk_send_random_and_check(looper, txnPoolNodeSet, - sdk_pool_handle, sdk_wallet_client, 1) + vdr_send_random_and_check(looper, txnPoolNodeSet, + vdr_pool_handle, vdr_wallet_client, 1) # All the other nodes complete the checkpoint and send Checkpoint messages # to others. The slow node receives and stashes these messages because it diff --git a/plenum/test/checkpoints/test_message_outside_watermark.py b/plenum/test/checkpoints/test_message_outside_watermark.py index a8df88b3da..2b6b2cdc24 100644 --- a/plenum/test/checkpoints/test_message_outside_watermark.py +++ b/plenum/test/checkpoints/test_message_outside_watermark.py @@ -7,7 +7,7 @@ from plenum.test.node_catchup.helper import waitNodeDataEquality, \ checkNodeDataForInequality from plenum.test.test_node import getNonPrimaryReplicas, TestReplica -from plenum.test.helper import sdk_send_random_and_check +from plenum.test.helper import vdr_send_random_and_check CHK_FREQ = 5 LOG_SIZE = 3 * CHK_FREQ @@ -25,8 +25,8 @@ def test_non_primary_recvs_3phase_message_outside_watermarks( reqs_for_logsize, looper, txnPoolNodeSet, - sdk_pool_handle, - sdk_wallet_client): + vdr_pool_handle, + vdr_wallet_client): """ A node is slow in receiving PRE-PREPAREs and PREPAREs. A lot of requests are sent and the slow node has started receiving COMMITs outside of its @@ -59,7 +59,7 @@ def test_non_primary_recvs_3phase_message_outside_watermarks( oldStashCount = slowReplica.stasher.stash_size(STASH_WATERMARKS) slowReplica._checkpointer.set_watermarks(slowReplica.h, LOG_SIZE) # 1. Send requests more than fit between the watermarks on the slow node - sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_client, reqs_for_logsize + 2) + vdr_send_random_and_check(looper, txnPoolNodeSet, vdr_pool_handle, vdr_wallet_client, reqs_for_logsize + 2) # Verify that the slow node stashes the batches outside of its watermarks newStashCount = slowReplica.stasher.stash_size(STASH_WATERMARKS) diff --git a/plenum/test/checkpoints/test_message_outside_watermark1.py b/plenum/test/checkpoints/test_message_outside_watermark1.py index 3da92f2856..7b1ba79c6b 100644 --- a/plenum/test/checkpoints/test_message_outside_watermark1.py +++ b/plenum/test/checkpoints/test_message_outside_watermark1.py @@ -7,7 +7,7 @@ from plenum.test.delayers import ppDelay, pDelay from plenum.test.test_node import getNonPrimaryReplicas, getPrimaryReplica from plenum.test.view_change.conftest import perf_chk_patched -from plenum.test.helper import sdk_send_random_and_check +from plenum.test.helper import vdr_send_random_and_check TestRunningTimeLimitSec = 300 PerfCheckFreq = 30 @@ -18,7 +18,7 @@ def test_primary_recvs_3phase_message_outside_watermarks(perf_chk_patched, chkFreqPatched, looper, txnPoolNodeSet, - sdk_pool_handle, sdk_wallet_client, reqs_for_logsize): + vdr_pool_handle, vdr_wallet_client, reqs_for_logsize): """ One of the primary starts getting lot of requests, more than his log size and queues up requests since they will go beyond its watermarks. This @@ -47,5 +47,5 @@ def test_primary_recvs_3phase_message_outside_watermarks(perf_chk_patched, chkFr def chk(): assert orderedCount + batch_count == pr._ordering_service.spylog.count(pr._ordering_service._order_3pc_key) - sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_client, reqs_to_send) + vdr_send_random_and_check(looper, txnPoolNodeSet, vdr_pool_handle, vdr_wallet_client, reqs_to_send) looper.run(eventually(chk, retryWait=1, timeout=total_timeout)) diff --git a/plenum/test/checkpoints/test_ordering_after_catchup.py b/plenum/test/checkpoints/test_ordering_after_catchup.py index 372a796c87..df2142bd25 100644 --- a/plenum/test/checkpoints/test_ordering_after_catchup.py +++ b/plenum/test/checkpoints/test_ordering_after_catchup.py @@ -1,9 +1,9 @@ -from plenum.test.helper import checkViewNoForNodes, sdk_send_random_and_check +from plenum.test.helper import checkViewNoForNodes, vdr_send_random_and_check from plenum.test.node_catchup.helper import waitNodeDataEquality, \ ensure_all_nodes_have_same_data from plenum.common.util import randomString from plenum.test.test_node import checkNodesConnected -from plenum.test.pool_transactions.helper import sdk_add_new_steward_and_node +from plenum.test.pool_transactions.helper import vdr_add_new_steward_and_node CHK_FREQ = 6 LOG_SIZE = 3 * CHK_FREQ @@ -17,7 +17,7 @@ def add_new_node(looper, pool_nodes, sdk_pool_handle, sdk_wallet_steward, node_name = "Node-" + name new_steward_name = "Steward-" + name - _, new_node = sdk_add_new_steward_and_node( + _, new_node = vdr_add_new_steward_and_node( looper, sdk_pool_handle, sdk_wallet_steward, new_steward_name, node_name, tdir, tconf, allPluginsPath=all_plugins_path) @@ -32,7 +32,7 @@ def add_new_node(looper, pool_nodes, sdk_pool_handle, sdk_wallet_steward, def test_ordering_after_more_than_f_nodes_caught_up( - chkFreqPatched, looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_steward, + chkFreqPatched, looper, txnPoolNodeSet, vdr_pool_handle, vdr_wallet_steward, tdir, tconf, allPluginsPath): """ Verifies that more than LOG_SIZE batches can be ordered in one view @@ -42,11 +42,11 @@ def test_ordering_after_more_than_f_nodes_caught_up( initial_view_no = txnPoolNodeSet[0].viewNo for _ in range(2): - add_new_node(looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_steward, + add_new_node(looper, txnPoolNodeSet, vdr_pool_handle, vdr_wallet_steward, tdir, tconf, allPluginsPath) checkViewNoForNodes(txnPoolNodeSet, initial_view_no) - sdk_send_random_and_check(looper, txnPoolNodeSet, - sdk_pool_handle, sdk_wallet_steward, 20) + vdr_send_random_and_check(looper, txnPoolNodeSet, + vdr_pool_handle, vdr_wallet_steward, 20) ensure_all_nodes_have_same_data(looper, txnPoolNodeSet, exclude_from_check=['check_last_ordered_3pc_backup']) checkViewNoForNodes(txnPoolNodeSet, initial_view_no) diff --git a/plenum/test/checkpoints/test_stable_checkpoint.py b/plenum/test/checkpoints/test_stable_checkpoint.py index 9d2f0a66ef..5a7dea9284 100644 --- a/plenum/test/checkpoints/test_stable_checkpoint.py +++ b/plenum/test/checkpoints/test_stable_checkpoint.py @@ -3,27 +3,27 @@ from plenum.common.exceptions import RequestRejectedException from plenum.test import waits from plenum.test.checkpoints.helper import checkRequestCounts, check_for_nodes, check_stable_checkpoint -from plenum.test.pool_transactions.helper import sdk_add_new_nym +from plenum.test.pool_transactions.helper import vdr_add_new_nym from stp_core.loop.eventually import eventually -from plenum.test.helper import sdk_send_random_and_check +from plenum.test.helper import vdr_send_random_and_check CHK_FREQ = 5 -def test_request_older_than_stable_checkpoint_removed(chkFreqPatched, looper, txnPoolNodeSet, sdk_pool_handle, - sdk_wallet_steward, reqs_for_checkpoint): +def test_request_older_than_stable_checkpoint_removed(chkFreqPatched, looper, txnPoolNodeSet, vdr_pool_handle, + vdr_wallet_steward, reqs_for_checkpoint): timeout = waits.expectedTransactionExecutionTime(len(txnPoolNodeSet)) max_batch_size = chkFreqPatched.Max3PCBatchSize # Send some requests (insufficient for checkpoint), # wait replies and check that current checkpoint is not stable - sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_steward, 2 * max_batch_size) + vdr_send_random_and_check(looper, txnPoolNodeSet, vdr_pool_handle, vdr_wallet_steward, 2 * max_batch_size) looper.run(eventually(check_for_nodes, txnPoolNodeSet, check_stable_checkpoint, 0, retryWait=1, timeout=timeout)) checkRequestCounts(txnPoolNodeSet, 2 * max_batch_size, 2) # From the steward send a request creating a user with None role - sdk_wallet_user = sdk_add_new_nym(looper, sdk_pool_handle, sdk_wallet_steward) + sdk_wallet_user = vdr_add_new_nym(looper, vdr_pool_handle, vdr_wallet_steward) looper.run(eventually(check_for_nodes, txnPoolNodeSet, check_stable_checkpoint, 0, retryWait=1, timeout=timeout)) checkRequestCounts(txnPoolNodeSet, 2 * max_batch_size + 1, 3) @@ -31,18 +31,18 @@ def test_request_older_than_stable_checkpoint_removed(chkFreqPatched, looper, tx # Dynamic validation of this request must fail since a user with None role cannot create users. # However, the 3PC-batch with the sent request must be ordered. with pytest.raises(RequestRejectedException): - sdk_add_new_nym(looper, sdk_pool_handle, sdk_wallet_user) + vdr_add_new_nym(looper, vdr_pool_handle, sdk_wallet_user) looper.run(eventually(check_for_nodes, txnPoolNodeSet, check_stable_checkpoint, 0, retryWait=1, timeout=timeout)) checkRequestCounts(txnPoolNodeSet, 2 * max_batch_size + 2, 4) # Send more requests to cause checkpoint stabilization - sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_steward, max_batch_size) + vdr_send_random_and_check(looper, txnPoolNodeSet, vdr_pool_handle, vdr_wallet_steward, max_batch_size) # Check that checkpoint is stable now # and verify that requests for it were removed looper.run(eventually(check_for_nodes, txnPoolNodeSet, check_stable_checkpoint, 5, retryWait=1, timeout=timeout)) checkRequestCounts(txnPoolNodeSet, 0, 0) # Send more requests to cause new checkpoint - sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_steward, reqs_for_checkpoint + 1) + vdr_send_random_and_check(looper, txnPoolNodeSet, vdr_pool_handle, vdr_wallet_steward, reqs_for_checkpoint + 1) looper.run(eventually(check_for_nodes, txnPoolNodeSet, check_stable_checkpoint, 10, retryWait=1, timeout=timeout)) checkRequestCounts(txnPoolNodeSet, 1, 1) diff --git a/plenum/test/checkpoints/test_stable_checkpoint1.py b/plenum/test/checkpoints/test_stable_checkpoint1.py index 76fe649616..aaf958711b 100644 --- a/plenum/test/checkpoints/test_stable_checkpoint1.py +++ b/plenum/test/checkpoints/test_stable_checkpoint1.py @@ -4,11 +4,11 @@ from plenum.test import waits from plenum.test.delayers import ppDelay from plenum.test.test_node import getPrimaryReplica -from plenum.test.helper import sdk_send_random_and_check +from plenum.test.helper import vdr_send_random_and_check -def test_stable_checkpoint_when_one_instance_slow(chkFreqPatched, tconf, looper, txnPoolNodeSet, sdk_pool_handle, - sdk_wallet_client, reqs_for_checkpoint): +def test_stable_checkpoint_when_one_instance_slow(chkFreqPatched, tconf, looper, txnPoolNodeSet, vdr_pool_handle, + vdr_wallet_client, reqs_for_checkpoint): delay = 5 pr = getPrimaryReplica(txnPoolNodeSet, 1) slowNode = pr.node @@ -16,7 +16,7 @@ def test_stable_checkpoint_when_one_instance_slow(chkFreqPatched, tconf, looper, for n in otherNodes: n.nodeIbStasher.delay(ppDelay(delay, 1)) - sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_client, reqs_for_checkpoint) + vdr_send_random_and_check(looper, txnPoolNodeSet, vdr_pool_handle, vdr_wallet_client, reqs_for_checkpoint) timeout = waits.expectedTransactionExecutionTime(len(txnPoolNodeSet)) + delay next_checkpoint = tconf.CHK_FREQ looper.run(eventually(check_for_nodes, txnPoolNodeSet, check_stable_checkpoint, next_checkpoint, diff --git a/plenum/test/checkpoints/test_stashed_checkpoint_processing.py b/plenum/test/checkpoints/test_stashed_checkpoint_processing.py index 9097972c6e..c7c54413cb 100644 --- a/plenum/test/checkpoints/test_stashed_checkpoint_processing.py +++ b/plenum/test/checkpoints/test_stashed_checkpoint_processing.py @@ -4,7 +4,7 @@ check_last_received_checkpoint, check_received_checkpoint_votes, check_stable_checkpoint, \ check_num_unstable_checkpoints from plenum.test.delayers import cDelay, chk_delay -from plenum.test.helper import sdk_send_random_and_check +from plenum.test.helper import vdr_send_random_and_check from stp_core.loop.eventually import eventually CHK_FREQ = 5 @@ -13,7 +13,7 @@ def test_stashed_checkpoint_processing(chkFreqPatched, looper, txnPoolNodeSet, - sdk_wallet_client, sdk_pool_handle): + vdr_wallet_client, vdr_pool_handle): """ One node in a pool of 5 nodes lags to order the last 3PC-batch in a checkpoint. By the moment when it eventually orders the 3PC-batch it has @@ -24,15 +24,15 @@ def test_stashed_checkpoint_processing(chkFreqPatched, looper, txnPoolNodeSet, """ epsilon = txnPoolNodeSet[-1] - sdk_send_random_and_check(looper, txnPoolNodeSet, - sdk_pool_handle, sdk_wallet_client, 4) + vdr_send_random_and_check(looper, txnPoolNodeSet, + vdr_pool_handle, vdr_wallet_client, 4) epsilon.nodeIbStasher.delay(cDelay()) epsilon.nodeIbStasher.delay(chk_delay(sender_filter='Gamma')) epsilon.nodeIbStasher.delay(chk_delay(sender_filter='Delta')) - sdk_send_random_and_check(looper, txnPoolNodeSet, - sdk_pool_handle, sdk_wallet_client, 1) + vdr_send_random_and_check(looper, txnPoolNodeSet, + vdr_pool_handle, vdr_wallet_client, 1) stabilization_timeout = \ waits.expectedTransactionExecutionTime(len(txnPoolNodeSet)) diff --git a/plenum/test/checkpoints/test_stashed_messages_processed_on_backup_replica_ordering_resumption.py b/plenum/test/checkpoints/test_stashed_messages_processed_on_backup_replica_ordering_resumption.py index e98a2f2118..136025b01b 100644 --- a/plenum/test/checkpoints/test_stashed_messages_processed_on_backup_replica_ordering_resumption.py +++ b/plenum/test/checkpoints/test_stashed_messages_processed_on_backup_replica_ordering_resumption.py @@ -7,7 +7,7 @@ from plenum.test import waits from plenum.test.checkpoints.helper import check_num_quorumed_received_checkpoints from plenum.test.delayers import cDelay, chk_delay, msg_rep_delay -from plenum.test.helper import sdk_send_random_requests, assertExp, incoming_3pc_msgs_count, get_pp_seq_no +from plenum.test.helper import vdr_send_random_requests, assertExp, incoming_3pc_msgs_count, get_pp_seq_no from stp_core.loop.eventually import eventually nodeCount = 4 @@ -32,8 +32,8 @@ def tconf(tconf): def test_stashed_messages_processed_on_backup_replica_ordering_resumption( looper, chkFreqPatched, reqs_for_checkpoint, - one_replica_and_others_in_backup_instance, - sdk_pool_handle, sdk_wallet_client, view_change_done, + vdr_one_replica_and_others_in_backup_instance, + vdr_pool_handle, vdr_wallet_client, vdr_view_change_done, txnPoolNodeSet): """ Verifies resumption of ordering 3PC-batches on a backup replica @@ -43,12 +43,12 @@ def test_stashed_messages_processed_on_backup_replica_ordering_resumption( Please note that to verify this case the config is set up so that LOG_SIZE == (Replica.STASHED_CHECKPOINTS_BEFORE_CATCHUP + 1) * CHK_FREQ """ - slow_replica, other_replicas = one_replica_and_others_in_backup_instance + slow_replica, other_replicas = vdr_one_replica_and_others_in_backup_instance view_no = slow_replica.viewNo batches_count = other_replicas[0].last_ordered_3pc[1] # Send a request and ensure that the replica orders the batch for it - sdk_send_random_requests(looper, sdk_pool_handle, sdk_wallet_client, 1) + vdr_send_random_requests(looper, vdr_pool_handle, vdr_wallet_client, 1) batches_count += 1 looper.run( @@ -68,7 +68,7 @@ def test_stashed_messages_processed_on_backup_replica_ordering_resumption( # Send a request for which the replica will not be able to order the batch # due to an insufficient count of Commits - sdk_send_random_requests(looper, sdk_pool_handle, sdk_wallet_client, 1) + vdr_send_random_requests(looper, vdr_pool_handle, vdr_wallet_client, 1) looper.runFor(waits.expectedTransactionExecutionTime(nodeCount)) # Receive further Commits from now on @@ -78,7 +78,7 @@ def test_stashed_messages_processed_on_backup_replica_ordering_resumption( # Send requests but in a quantity insufficient # for catch-up number of checkpoints reqs_until_checkpoints = reqs_for_checkpoint - other_replicas[0].last_ordered_3pc[1] - sdk_send_random_requests(looper, sdk_pool_handle, sdk_wallet_client, + vdr_send_random_requests(looper, vdr_pool_handle, vdr_wallet_client, Replica.STASHED_CHECKPOINTS_BEFORE_CATCHUP * reqs_until_checkpoints) looper.runFor(waits.expectedTransactionExecutionTime(nodeCount)) @@ -87,7 +87,7 @@ def test_stashed_messages_processed_on_backup_replica_ordering_resumption( slow_replica.node.nodeIbStasher.delay(chk_delay(instId=1)) # Send more requests to reach catch-up number of checkpoints - sdk_send_random_requests(looper, sdk_pool_handle, sdk_wallet_client, + vdr_send_random_requests(looper, vdr_pool_handle, vdr_wallet_client, reqs_for_checkpoint) looper.runFor(waits.expectedTransactionExecutionTime(nodeCount)) @@ -96,7 +96,7 @@ def test_stashed_messages_processed_on_backup_replica_ordering_resumption( assert slow_replica.stasher.stash_size(STASH_WATERMARKS) == 0 # Send a request for which the batch will be outside of the watermarks - sdk_send_random_requests(looper, sdk_pool_handle, sdk_wallet_client, 1) + vdr_send_random_requests(looper, vdr_pool_handle, vdr_wallet_client, 1) looper.runFor(waits.expectedTransactionExecutionTime(nodeCount)) # Ensure that the replica has not ordered any batches @@ -139,7 +139,7 @@ def test_stashed_messages_processed_on_backup_replica_ordering_resumption( assert slow_replica.stasher.stash_size(STASH_WATERMARKS) == 0 # Send a request and ensure that the replica orders the batch for it - sdk_send_random_requests(looper, sdk_pool_handle, sdk_wallet_client, 1) + vdr_send_random_requests(looper, vdr_pool_handle, vdr_wallet_client, 1) batches_count += 1 looper.run( diff --git a/plenum/test/checkpoints/test_view_change_after_checkpoint.py b/plenum/test/checkpoints/test_view_change_after_checkpoint.py index b65d7adf4d..c9e868bc41 100644 --- a/plenum/test/checkpoints/test_view_change_after_checkpoint.py +++ b/plenum/test/checkpoints/test_view_change_after_checkpoint.py @@ -5,7 +5,7 @@ from plenum.test.test_node import ensureElectionsDone from plenum.test.view_change.helper import ensure_view_change from stp_core.loop.eventually import eventually -from plenum.test.helper import sdk_send_batches_of_random_and_check, get_pp_seq_no +from plenum.test.helper import vdr_send_batches_of_random_and_check, get_pp_seq_no CHK_FREQ = 5 @@ -28,7 +28,7 @@ def sent_batches(request, chkFreqPatched): @pytest.mark.skip(reason="INDY-1336. For now, preprepares, prepares and commits queues are cleaned after view change") def test_checkpoint_across_views(sent_batches, chkFreqPatched, looper, txnPoolNodeSet, - sdk_pool_handle, sdk_wallet_client): + vdr_pool_handle, vdr_wallet_client): """ Test checkpointing across views. This test checks that checkpointing and garbage collection works correctly @@ -40,7 +40,7 @@ def test_checkpoint_across_views(sent_batches, chkFreqPatched, looper, txnPoolNo low_watermark = txnPoolNodeSet[0].master_replica.h batch_size = chkFreqPatched.Max3PCBatchSize - sdk_send_batches_of_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_client, + vdr_send_batches_of_random_and_check(looper, txnPoolNodeSet, vdr_pool_handle, vdr_wallet_client, batch_size * sent_batches, sent_batches) batches_count += sent_batches @@ -81,7 +81,7 @@ def test_checkpoint_across_views(sent_batches, chkFreqPatched, looper, txnPoolNo additional_after_vc = 1 # Even after view change, chekpointing works - sdk_send_batches_of_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_client, + vdr_send_batches_of_random_and_check(looper, txnPoolNodeSet, vdr_pool_handle, vdr_wallet_client, batch_size * sent_batches, sent_batches) batches_count += sent_batches @@ -92,7 +92,7 @@ def test_checkpoint_across_views(sent_batches, chkFreqPatched, looper, txnPoolNo # when this test finishes, all requests are garbage collected and the # next run of this test (with next param) has the calculations correct more = CHK_FREQ - expected_batch_count - sdk_send_batches_of_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_client, + vdr_send_batches_of_random_and_check(looper, txnPoolNodeSet, vdr_pool_handle, vdr_wallet_client, batch_size * more, more) batches_count += more looper.run(eventually(checkRequestCounts, txnPoolNodeSet, 0, 0, retryWait=1)) diff --git a/plenum/test/checkpoints/test_watermarks_on_delayed_backup.py b/plenum/test/checkpoints/test_watermarks_on_delayed_backup.py index 177afc0235..5f7a6bb545 100644 --- a/plenum/test/checkpoints/test_watermarks_on_delayed_backup.py +++ b/plenum/test/checkpoints/test_watermarks_on_delayed_backup.py @@ -4,7 +4,7 @@ from plenum.common.messages.node_messages import PrePrepare from plenum.common.stashing_router import DISCARD -from plenum.test.helper import sdk_send_batches_of_random_and_check +from plenum.test.helper import vdr_send_batches_of_random_and_check from plenum.test.test_node import getNonPrimaryReplicas logger = getLogger() @@ -19,7 +19,7 @@ def test_watermarks_restored_after_stable( looper, chkFreqPatched, txnPoolNodeSet, - sdk_pool_handle, sdk_wallet_client): + vdr_pool_handle, vdr_wallet_client): """ A backup replica doesn't participate in consensus, and hence doesn't update watermarks. Then if it gets a quorum of stashed checkpoints (in fact Replica.STASHED_CHECKPOINTS_BEFORE_CATCHUP + 1 checkpoints @@ -35,7 +35,7 @@ def test_watermarks_restored_after_stable( # 2. send the number of requests which is less than Replica.STASHED_CHECKPOINTS_BEFORE_CATCHUP + 1 # quorumed checkpoints, # but sufficient for one watermark change (on a non-broken replica). - sdk_send_batches_of_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_client, + vdr_send_batches_of_random_and_check(looper, txnPoolNodeSet, vdr_pool_handle, vdr_wallet_client, num_reqs=1 * 9, num_batches=9) assert broken_replica.last_ordered_3pc == (0, 0) assert broken_replica.h == 0 @@ -47,7 +47,7 @@ def test_watermarks_restored_after_stable( # 3. send requests to reach Replica.STASHED_CHECKPOINTS_BEFORE_CATCHUP + 1 # quorumed checkpoints. # The broken replica should adjust last_ordered_3pc and shift watermarks. - sdk_send_batches_of_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_client, + vdr_send_batches_of_random_and_check(looper, txnPoolNodeSet, vdr_pool_handle, vdr_wallet_client, num_reqs=1, num_batches=1) assert broken_replica.last_ordered_3pc == (0, 10) assert broken_replica.h == 10 @@ -59,7 +59,7 @@ def test_watermarks_restored_after_stable( # 4. Repair broken replica and make sure that it participates in consensus # (after watermarks were corrected). repair_broken_replica(broken_replica) - sdk_send_batches_of_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_client, + vdr_send_batches_of_random_and_check(looper, txnPoolNodeSet, vdr_pool_handle, vdr_wallet_client, num_reqs=7, num_batches=7) assert broken_replica.last_ordered_3pc == (0, 17) assert broken_replica.h == 15 diff --git a/plenum/test/client/test_client.py b/plenum/test/client/test_client.py index 8c2d56f635..8ac3ae6236 100644 --- a/plenum/test/client/test_client.py +++ b/plenum/test/client/test_client.py @@ -10,9 +10,9 @@ from plenum.server.node import Node from plenum.test import waits from plenum.test.helper import \ - checkLastClientReqForNode, sdk_signed_random_requests, \ - sdk_send_signed_requests, sdk_json_to_request_object, \ - sdk_get_and_check_replies, sdk_send_random_request + checkLastClientReqForNode, vdr_signed_random_requests, \ + vdr_send_signed_requests, vdr_json_to_request_object, \ + vdr_get_and_check_replies, vdr_send_random_request nodeCount = 7 @@ -23,19 +23,19 @@ # noinspection PyIncorrectDocstring def testSendRequestWithoutSignatureFails(looper, txnPoolNodeSet, - sdk_pool_handle, sdk_wallet_client): + vdr_pool_handle, vdr_wallet_client): """ A client request sent without a signature fails with an EmptySignature exception """ # remove the client's ability to sign - requests = sdk_signed_random_requests(looper, sdk_wallet_client, 1) + requests = vdr_signed_random_requests(looper, vdr_wallet_client, 1) json_req = json.loads(requests[0]) json_req['signature'] = None request = json.dumps(json_req) - res = sdk_send_signed_requests(sdk_pool_handle, [request]) - obj_req = sdk_json_to_request_object(res[0][0]) + res = vdr_send_signed_requests(vdr_pool_handle, [request], looper) + obj_req = vdr_json_to_request_object(res[0][0]) timeout = waits.expectedClientRequestPropagationTime(nodeCount) @@ -61,13 +61,13 @@ def testSendRequestWithoutSignatureFails(looper, txnPoolNodeSet, # noinspection PyIncorrectDocstring -def testReplyWhenRequestAlreadyExecuted(looper, txnPoolNodeSet, sdk_pool_handle, - sdk_wallet_client, sent1): +def testReplyWhenRequestAlreadyExecuted(looper, txnPoolNodeSet, vdr_pool_handle, + vdr_wallet_client, sent1): """ When a request has already been executed the previously executed reply will be sent again to the client. An acknowledgement will not be sent for a repeated request. """ - sdk_get_and_check_replies(looper, sent1) - req = sdk_send_random_request(looper, sdk_pool_handle, sdk_wallet_client) - sdk_get_and_check_replies(looper, [req]) + vdr_get_and_check_replies(looper, sent1) + req = vdr_send_random_request(looper, vdr_pool_handle, vdr_wallet_client) + vdr_get_and_check_replies(looper, [req]) diff --git a/plenum/test/client/test_client_send_wrong_ledger_status.py b/plenum/test/client/test_client_send_wrong_ledger_status.py index c08951d105..c498845d37 100644 --- a/plenum/test/client/test_client_send_wrong_ledger_status.py +++ b/plenum/test/client/test_client_send_wrong_ledger_status.py @@ -5,17 +5,17 @@ @pytest.fixture() -def client(test_node): - sock = create_zmq_connection(test_node, zmq.DEALER) +def client(vdr_test_node): + sock = create_zmq_connection(vdr_test_node, zmq.DEALER) yield sock sock.close(linger=0) sock = None - test_node.stop() + vdr_test_node.stop() -def test_client_send_wrong_ledger_status(client, looper, test_node): - looper.add(test_node) +def test_client_send_wrong_ledger_status(client, looper, vdr_test_node): + looper.add(vdr_test_node) wrong_msg = "{ \"op\": \"LEDGER_STATUS\", \"txnSeqNo\": 0, \"merkleRoot\": null, \"ledgerId\": 0, \"ppSeqNo\": null, \"viewNo\": null, \"protocolVersion\": 2}" client.send_string(wrong_msg) # ugly hack... needs to run several steps for looper diff --git a/plenum/test/client/test_protocol_version.py b/plenum/test/client/test_protocol_version.py index 0e21e042bc..964926753f 100644 --- a/plenum/test/client/test_protocol_version.py +++ b/plenum/test/client/test_protocol_version.py @@ -1,15 +1,15 @@ import pytest - +import json +from indy_vdr.error import VdrError from plenum.common.messages.node_messages import LedgerStatus from plenum.common.types import f - from plenum.server.node import Node - +from plenum.common.request import Request from plenum.common.constants import CURRENT_PROTOCOL_VERSION from plenum.common.exceptions import RequestNackedException, CommonSdkIOException -from plenum.test.helper import sdk_send_signed_requests, \ - sdk_get_and_check_replies, sdk_random_request_objects, \ - sdk_sign_request_objects, sdk_get_bad_response, sdk_send_random_and_check +from plenum.test.helper import vdr_send_signed_requests, \ + vdr_get_and_check_replies, vdr_random_request_objects, \ + vdr_sign_request_objects, vdr_get_bad_response, vdr_send_random_and_check, vdr_signed_random_requests error_msg = 'Make sure that the latest LibIndy is used ' \ 'and `set_protocol_version({})` is called' \ @@ -54,14 +54,14 @@ def test_client_send_incorrect_ledger_status(looper, txnPoolNodeSet): def test_client_send_correct_ledger_status(looper, - sdk_pool_handle, - sdk_wallet_client, + vdr_pool_handle, + vdr_wallet_client, txnPoolNodeSet): # Client send LEDGER_STATUS with protocoloVersion field. # Node send her LEDGER_STATUS back - sdk_send_random_and_check(looper, txnPoolNodeSet, - sdk_pool_handle, - sdk_wallet_client, 1) + vdr_send_random_and_check(looper, txnPoolNodeSet, + vdr_pool_handle, + vdr_wallet_client, 1) # node sent LEDGER_STATUS spy = txnPoolNodeSet[0].spylog @@ -70,69 +70,63 @@ def test_client_send_correct_ledger_status(looper, send.params['remoteName'] != 'client_1' and isinstance(send.params['msg'], LedgerStatus)]) == 1 - +#Proposed Test need confirmation def test_request_none_protocol_version(looper, txnPoolNodeSet, - sdk_pool_handle, - sdk_wallet_client, + vdr_pool_handle, + vdr_wallet_client, request_num): - _, did = sdk_wallet_client - req_objs = sdk_random_request_objects(request_num, identifier=did, - protocol_version=None) - for req_obj in req_objs: - assert req_obj.protocolVersion == None - - signed_reqs = sdk_sign_request_objects(looper, sdk_wallet_client, req_objs) - reqs = sdk_send_signed_requests(sdk_pool_handle, signed_reqs) - sdk_get_bad_response(looper, reqs, RequestNackedException, - 'missed fields - protocolVersion. ' + error_msg) + _, did = vdr_wallet_client + req_obj = Request(identifier=did, protocolVersion=None) + assert req_obj.protocolVersion == None + + with pytest.raises(VdrError) as e: + vdr_signed_random_requests(looper, vdr_wallet_client, request_num, protocol_version=None) + #sdk_get_bad_response(looper, reqs, RequestNackedException, + # 'missed fields - protocolVersion. ' + error_msg) def test_request_with_outdated_version(looper, txnPoolNodeSet, - sdk_pool_handle, - sdk_wallet_client, + vdr_pool_handle, + vdr_wallet_client, request_num): - _, did = sdk_wallet_client - reqs_obj = sdk_random_request_objects(request_num, identifier=did, - protocol_version=CURRENT_PROTOCOL_VERSION - 1) - for req_obj in reqs_obj: - assert req_obj.protocolVersion == CURRENT_PROTOCOL_VERSION - 1 + _, did = vdr_wallet_client + protocol = int(CURRENT_PROTOCOL_VERSION) - 1 + req_obj = Request(identifier=did, protocolVersion=protocol) + assert req_obj.protocolVersion == protocol - signed_reqs = sdk_sign_request_objects(looper, sdk_wallet_client, reqs_obj) - reqs = sdk_send_signed_requests(sdk_pool_handle, signed_reqs) - sdk_get_bad_response(looper, reqs, RequestNackedException, - 'differs from current protocol version. ' - .format(CURRENT_PROTOCOL_VERSION) + error_msg) + with pytest.raises(VdrError) as e: + vdr_signed_random_requests(looper, vdr_wallet_client, request_num, protocol_version=CURRENT_PROTOCOL_VERSION - 1) def test_request_with_invalid_version(looper, txnPoolNodeSet, - sdk_pool_handle, - sdk_wallet_client, + vdr_pool_handle, + vdr_wallet_client, request_num): - _, did = sdk_wallet_client - reqs_obj = sdk_random_request_objects(request_num, identifier=did, - protocol_version=-1) - for req_obj in reqs_obj: - assert req_obj.protocolVersion == -1 + _, did = vdr_wallet_client + req_obj = Request(identifier=did, protocolVersion=-1) + assert req_obj.protocolVersion == -1 - signed_reqs = sdk_sign_request_objects(looper, sdk_wallet_client, reqs_obj) - reqs = sdk_send_signed_requests(sdk_pool_handle, signed_reqs) - sdk_get_bad_response(looper, reqs, CommonSdkIOException, - 'Got an error with code 113') + signed_objects = vdr_signed_random_requests(looper, vdr_wallet_client, request_num) + reqs = vdr_send_signed_requests(vdr_pool_handle, signed_objects, looper) + vdr_get_bad_response(looper, reqs, RequestNackedException, + 'missed fields - protocolVersion. ' + error_msg) + + def test_request_with_correct_version(looper, txnPoolNodeSet, - sdk_pool_handle, - sdk_wallet_client, + vdr_pool_handle, + vdr_wallet_client, request_num): - _, did = sdk_wallet_client - reqs_obj = sdk_random_request_objects(request_num, identifier=did, + _, did = vdr_wallet_client + reqs_obj = vdr_random_request_objects(request_num, identifier=did, protocol_version=CURRENT_PROTOCOL_VERSION) for req_obj in reqs_obj: - assert req_obj.protocolVersion == CURRENT_PROTOCOL_VERSION + assert json.loads(req_obj.body)["protocolVersion"] == CURRENT_PROTOCOL_VERSION - signed_reqs = sdk_sign_request_objects(looper, sdk_wallet_client, reqs_obj) - reqs = sdk_send_signed_requests(sdk_pool_handle, signed_reqs) - sdk_get_and_check_replies(looper, reqs) + signed_reqs = vdr_sign_request_objects(looper, vdr_wallet_client, reqs_obj) + reqs = vdr_send_signed_requests(vdr_pool_handle, signed_reqs, looper) + vdr_get_and_check_replies(looper, reqs) diff --git a/plenum/test/client/test_send_from_wrong_socket.py b/plenum/test/client/test_send_from_wrong_socket.py index 0f1b0aedcf..078ea835df 100644 --- a/plenum/test/client/test_send_from_wrong_socket.py +++ b/plenum/test/client/test_send_from_wrong_socket.py @@ -9,17 +9,17 @@ @pytest.fixture(params=[zmq.REQ]) -def zmq_connection(test_node, request, looper): - sock = create_zmq_connection(test_node, request.param) +def zmq_connection(vdr_test_node, request, looper): + sock = create_zmq_connection(vdr_test_node, request.param) yield sock sock.close(linger=0) sock = None - test_node.stop() - looper.removeProdable(test_node) + vdr_test_node.stop() + looper.removeProdable(vdr_test_node) -def test_send_using_not_dealer_socket(zmq_connection, test_node, looper, sdk_wallet_client, logsearch): +def test_send_using_not_dealer_socket(zmq_connection, vdr_test_node, looper, vdr_wallet_client, logsearch): default_log_level = logging.root.level Logger.setLogLevel(logging.DEBUG) logs, _ = logsearch(files=['zstack.py'], msgs=['Got too many values for unpack']) @@ -27,7 +27,7 @@ def test_send_using_not_dealer_socket(zmq_connection, test_node, looper, sdk_wal def check_reply(): assert logs - looper.add(test_node) + looper.add(vdr_test_node) msg = "{ \"op\": \"LEDGER_STATUS\", \"txnSeqNo\": 0, \"merkleRoot\": \"\", \"ledgerId\": 0, \"ppSeqNo\": null, \"viewNo\": null, \"protocolVersion\": 2}" zmq_connection.send_string(msg) looper.run(eventually(check_reply)) diff --git a/plenum/test/common/test_digest_validation.py b/plenum/test/common/test_digest_validation.py index 0df35c724d..1fdc84a440 100644 --- a/plenum/test/common/test_digest_validation.py +++ b/plenum/test/common/test_digest_validation.py @@ -2,7 +2,8 @@ import types import pytest -from indy.did import create_and_store_my_did + +from plenum.test.wallet_helper import vdr_create_and_store_did from plenum.server.consensus.ordering_service import OrderingService @@ -13,9 +14,9 @@ from plenum.test.test_node import getPrimaryReplica from plenum.common.exceptions import RequestNackedException from plenum.common.request import Request -from plenum.test.helper import sdk_gen_request, sdk_multisign_request_object, sdk_send_signed_requests, \ - sdk_get_and_check_replies, sdk_random_request_objects, waitForViewChange, max_3pc_batch_limits, \ - sdk_send_random_and_check +from plenum.test.helper import vdr_gen_request, vdr_multisign_request_object, vdr_send_signed_requests, \ + vdr_get_and_check_replies, vdr_random_request_objects, waitForViewChange, max_3pc_batch_limits, \ + vdr_send_random_and_check from plenum.common.constants import CURRENT_PROTOCOL_VERSION, DOMAIN_LEDGER_ID, TXN_TYPE @@ -24,11 +25,11 @@ @pytest.fixture(scope='function') -def op(looper, sdk_wallet_stewards): - wh, did = sdk_wallet_stewards[0] +def op(looper, vdr_wallet_stewards): + wh, did = vdr_wallet_stewards[0] seed = randomString(32) new_did, new_verkey = looper.loop.run_until_complete( - create_and_store_my_did(wh, json.dumps({'seed': seed}))) + vdr_create_and_store_did(wh, seed)) op = {'type': '1', 'dest': new_did, 'verkey': new_verkey, @@ -51,15 +52,15 @@ def wait_one_batch(node, before): @pytest.fixture(scope='function') -def two_requests(looper, op, sdk_wallet_stewards): - wh, did = sdk_wallet_stewards[0] +def two_requests(looper, op, vdr_wallet_stewards): + wh, did = vdr_wallet_stewards[0] - req = json.dumps(sdk_gen_request(op, protocol_version=CURRENT_PROTOCOL_VERSION, + req = json.dumps(vdr_gen_request(op, protocol_version=CURRENT_PROTOCOL_VERSION, identifier=did).as_dict) - req1 = sdk_multisign_request_object(looper, sdk_wallet_stewards[0], req) + req1 = vdr_multisign_request_object(looper, vdr_wallet_stewards[0], req) req_obj1 = Request(**json.loads(req1)) - req2 = sdk_multisign_request_object(looper, sdk_wallet_stewards[1], req1) + req2 = vdr_multisign_request_object(looper, vdr_wallet_stewards[1], req1) req_obj2 = Request(**json.loads(req2)) assert req_obj1.payload_digest == req_obj2.payload_digest @@ -68,39 +69,40 @@ def two_requests(looper, op, sdk_wallet_stewards): def test_second_digest_is_written( - looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_stewards): - req = json.dumps(sdk_random_request_objects(1, CURRENT_PROTOCOL_VERSION, sdk_wallet_stewards[0][1])[0].as_dict) - req = sdk_multisign_request_object(looper, sdk_wallet_stewards[0], req) - req = sdk_multisign_request_object(looper, sdk_wallet_stewards[1], req) - sdk_get_and_check_replies(looper, sdk_send_signed_requests(sdk_pool_handle, [req])) + looper, txnPoolNodeSet, vdr_pool_handle, vdr_wallet_stewards): + reqs = vdr_random_request_objects(1, CURRENT_PROTOCOL_VERSION, vdr_wallet_stewards[0][1]) + #req = json.dumps(reqs.as_dict) + req = vdr_multisign_request_object(looper, vdr_wallet_stewards[0], reqs[0]) + req = vdr_multisign_request_object(looper, vdr_wallet_stewards[1], reqs[0]) + res = vdr_get_and_check_replies(looper, vdr_send_signed_requests(vdr_pool_handle, [req], looper)) - req = Request(**json.loads(req)) + req_metadata = json.loads(res[0][1]["Alpha"])["result"]["txn"]["metadata"] - ledger_id, _ = txnPoolNodeSet[0].seqNoDB.get_by_payload_digest(req.payload_digest) + ledger_id, _ = txnPoolNodeSet[0].seqNoDB.get_by_payload_digest(req_metadata["payloadDigest"]) assert ledger_id == DOMAIN_LEDGER_ID - payload_digest = txnPoolNodeSet[0].seqNoDB.get_by_full_digest(req.digest) - assert payload_digest == req.payload_digest + payload_digest = txnPoolNodeSet[0].seqNoDB.get_by_full_digest(req_metadata["digest"]) + assert payload_digest == req_metadata["payloadDigest"] def test_send_same_txn_with_different_signatures_in_separate_batches( - looper, txnPoolNodeSet, sdk_pool_handle, two_requests): + looper, txnPoolNodeSet, vdr_pool_handle, two_requests): # Send two txn with same payload digest but different signatures, # so that they could be processed in one batch, trying to break the ledger hashes req1, req2 = two_requests - rep1 = sdk_send_signed_requests(sdk_pool_handle, [req1]) - sdk_get_and_check_replies(looper, rep1) + rep1 = vdr_send_signed_requests(vdr_pool_handle, [req1], looper) + vdr_get_and_check_replies(looper, rep1) - rep2 = sdk_send_signed_requests(sdk_pool_handle, [req2]) + rep2 = vdr_send_signed_requests(vdr_pool_handle, [req2], looper) with pytest.raises(RequestNackedException) as e: - sdk_get_and_check_replies(looper, rep2) + vdr_get_and_check_replies(looper, rep2) e.match('Same txn was already ordered with different signatures or pluggable fields') def test_send_same_txn_with_different_signatures_in_one_batch( - looper, txnPoolNodeSet, sdk_pool_handle, two_requests, tconf): + looper, txnPoolNodeSet, vdr_pool_handle, two_requests, tconf): req1, req2 = two_requests lo_before = (txnPoolNodeSet[0].replicas[0].last_ordered_3pc[1], @@ -108,8 +110,8 @@ def test_send_same_txn_with_different_signatures_in_one_batch( old_reqs = len(txnPoolNodeSet[0].requests) with max_3pc_batch_limits(tconf, size=2): - sdk_send_signed_requests(sdk_pool_handle, [req1]) - sdk_send_signed_requests(sdk_pool_handle, [req2]) + vdr_send_signed_requests(vdr_pool_handle, [req1], looper) + vdr_send_signed_requests(vdr_pool_handle, [req2], looper) # We need to check for ordering this way, cause sdk do not allow # track two requests with same reqId at the same time @@ -125,7 +127,7 @@ def test_send_same_txn_with_different_signatures_in_one_batch( def test_parts_of_nodes_have_same_request_with_different_signatures( - looper, txnPoolNodeSet, sdk_pool_handle, two_requests, sdk_wallet_stewards, tconf): + looper, txnPoolNodeSet, vdr_pool_handle, two_requests, vdr_wallet_stewards, tconf): req1s, req2s = two_requests req1 = Request(**json.loads(req1s)) req2 = Request(**json.loads(req2s)) @@ -158,19 +160,19 @@ def test_parts_of_nodes_have_same_request_with_different_signatures( assert node.spylog.count(node.request_propagates) >= 1 node.spylog.getAll(node.request_propagates) - req1s = sdk_send_signed_requests(sdk_pool_handle, [req1s]) - sdk_get_and_check_replies(looper, req1s) + req1s = vdr_send_signed_requests(vdr_pool_handle, [req1s], looper) + vdr_get_and_check_replies(looper, req1s) - req2s = sdk_send_signed_requests(sdk_pool_handle, [req2s]) + req2s = vdr_send_signed_requests(vdr_pool_handle, [req2s], looper) with pytest.raises(RequestNackedException) as e: - sdk_get_and_check_replies(looper, req2s) + vdr_get_and_check_replies(looper, req2s) e.match('Same txn was already ordered with different signatures or pluggable fields') ensure_all_nodes_have_same_data(looper, txnPoolNodeSet) def test_suspicious_primary_send_same_request_with_different_signatures( - looper, txnPoolNodeSet, sdk_pool_handle, two_requests): + looper, txnPoolNodeSet, vdr_pool_handle, two_requests): assert txnPoolNodeSet[0].master_replica.isPrimary txnPoolNodeSet[0].master_replica._ordering_service._do_dynamic_validation = \ types.MethodType(malicious_dynamic_validation, @@ -179,8 +181,8 @@ def test_suspicious_primary_send_same_request_with_different_signatures( req1, req2 = two_requests old_view = txnPoolNodeSet[0].viewNo - sdk_send_signed_requests(sdk_pool_handle, [req1]) - sdk_send_signed_requests(sdk_pool_handle, [req2]) + vdr_send_signed_requests(vdr_pool_handle, [req1], looper) + vdr_send_signed_requests(vdr_pool_handle, [req2], looper) waitForViewChange(looper, txnPoolNodeSet, expectedViewNo=old_view + 1) all(cll.params['msg'][1] == Suspicions.PPR_WITH_ORDERED_REQUEST.code for cll in @@ -191,8 +193,8 @@ def test_suspicious_primary_send_same_request_with_different_signatures( def test_suspicious_primary_send_same_request_with_same_signatures( - looper, txnPoolNodeSet, sdk_pool_handle, two_requests, sdk_wallet_stewards, tconf): - couple = sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_stewards[0], 1)[0] + looper, txnPoolNodeSet, vdr_pool_handle, two_requests, vdr_wallet_stewards, tconf): + couple = vdr_send_random_and_check(looper, txnPoolNodeSet, vdr_pool_handle, vdr_wallet_stewards[0], 1)[0] req = Request(**couple[0]) replica = getPrimaryReplica(txnPoolNodeSet) replica._ordering_service._do_dynamic_validation = types.MethodType(malicious_dynamic_validation, replica.node) diff --git a/plenum/test/conftest.py b/plenum/test/conftest.py index c21d1959b2..8c40efebf2 100644 --- a/plenum/test/conftest.py +++ b/plenum/test/conftest.py @@ -7,14 +7,22 @@ import re import warnings import json +import asyncio from contextlib import ExitStack from functools import partial import time from typing import Dict, Any, List + +from plenum.test.wallet_helper import vdr_create_and_store_did, vdr_wallet_helper, vdr_pool_helper +from indy_vdr import set_protocol_version + from indy.pool import create_pool_ledger_config, open_pool_ledger, close_pool_ledger -from indy.wallet import create_wallet, open_wallet, close_wallet -from indy.did import create_and_store_my_did +from indy.wallet import create_wallet as sdk_create_wallet +from indy.wallet import open_wallet as sdk_open_wallet +from indy.wallet import close_wallet as sdk_close_wallet +from indy.did import create_and_store_my_did as sdk_create_and_store_my_did + from ledger.genesis_txn.genesis_txn_file_util import create_genesis_txn_init_ledger from plenum.bls.bls_crypto_factory import create_default_bls_crypto_factory @@ -33,7 +41,8 @@ from plenum.test.greek import genNodeNames from plenum.test.grouped_load_scheduling import GroupedLoadScheduling from plenum.test.node_catchup.helper import waitNodeDataEquality, check_last_3pc_master -from plenum.test.pool_transactions.helper import sdk_add_new_nym, sdk_pool_refresh, sdk_add_new_steward_and_node +from plenum.test.pool_transactions.helper import vdr_add_new_nym, vdr_pool_refresh, vdr_add_new_steward_and_node, \ + sdk_pool_refresh, sdk_add_new_nym, sdk_add_new_steward_and_node from plenum.test.simulation.sim_random import DefaultSimRandom from plenum.test.spy_helpers import getAllReturnVals from plenum.test.view_change.helper import ensure_view_change @@ -57,8 +66,9 @@ from plenum.test.helper import checkLastClientReqForNode, \ waitForViewChange, requestReturnedToNode, randomText, \ mockDistributions, mockImportModule, chk_all_funcs, \ - create_new_test_node, sdk_json_to_request_object, sdk_send_random_requests, \ - sdk_get_and_check_replies, sdk_set_protocol_version, sdk_send_random_and_check, MockTimer, create_pool_txn_data + create_new_test_node, vdr_json_to_request_object, vdr_send_random_requests, \ + vdr_get_and_check_replies, vdr_set_protocol_version, vdr_send_random_and_check, MockTimer, \ + create_pool_txn_data, sdk_set_protocol_version, sdk_send_random_and_check from plenum.test.node_request.node_request_helper import checkPrePrepared, \ checkPropagated, checkPrepared, checkCommitted from plenum.test.plugin.helper import getPluginPath @@ -77,6 +87,7 @@ ROCKSDB_WRITE_BUFFER_SIZE = 256 * 1024 + def get_data_for_role(pool_txn_data, role): name_and_seeds = [] for txn in pool_txn_data['txns']: @@ -91,6 +102,11 @@ def get_data_for_role(pool_txn_data, role): def pytest_xdist_make_scheduler(config, log): return GroupedLoadScheduling(config, log) +@pytest.fixture(scope="module") +def event_loop(): + loop = asyncio.new_event_loop() + yield loop + loop.close() @pytest.fixture(scope="session") def warnfilters(): @@ -442,10 +458,10 @@ def delayed_perf_chk(txnPoolNodeSet): @pytest.fixture(scope="module") -def sent1(looper, sdk_pool_handle, - sdk_wallet_client): - request_couple_json = sdk_send_random_requests( - looper, sdk_pool_handle, sdk_wallet_client, 1) +def sent1(looper, vdr_pool_handle, + vdr_wallet_client): + request_couple_json = vdr_send_random_requests( + looper, vdr_pool_handle, vdr_wallet_client, 1) return request_couple_json @@ -453,7 +469,7 @@ def sent1(looper, sdk_pool_handle, def reqAcked1(looper, txnPoolNodeSet, sent1, faultyNodes): numerOfNodes = len(txnPoolNodeSet) - request = sdk_json_to_request_object(sent1[0][0]) + request = vdr_json_to_request_object(sent1[0][0]) # Wait until request received by all nodes propTimeout = waits.expectedClientToPoolRequestDeliveryTime(numerOfNodes) @@ -524,13 +540,13 @@ def committed1(looper, txnPoolNodeSet, prepared1, faultyNodes): @pytest.fixture(scope="module") -def replied1(looper, txnPoolNodeSet, sdk_wallet_client, +def replied1(looper, txnPoolNodeSet, vdr_wallet_client, committed1, faultyNodes, sent1): numOfNodes = len(txnPoolNodeSet) numOfInstances = getNoInstances(numOfNodes) quorum = numOfInstances * (numOfNodes - faultyNodes) - _, did = sdk_wallet_client + _, did = vdr_wallet_client def checkOrderedCount(): resp = [requestReturnedToNode(node, @@ -544,7 +560,7 @@ def checkOrderedCount(): retryWait=1, timeout=orderingTimeout)) - sdk_get_and_check_replies(looper, sent1) + vdr_get_and_check_replies(looper, sent1) return committed1 @@ -873,7 +889,7 @@ def sdk_wallet_data(): shutil.rmtree(w_dir, ignore_errors=True) -async def _gen_pool_handler(work_dir, name, open_config): +async def sdk_gen_pool_handler(work_dir, name, open_config): txn_file_name = os.path.join(work_dir, "pool_transactions_genesis") pool_config = json.dumps({"genesis_txn": str(txn_file_name)}) await create_pool_ledger_config(name, pool_config) @@ -889,7 +905,7 @@ def sdk_pool_handle(looper, txnPoolNodeSet, tdirWithPoolTxns, sdk_pool_data): sdk_set_protocol_version(looper) pool_name, open_config = sdk_pool_data pool_handle = looper.loop.run_until_complete( - _gen_pool_handler(tdirWithPoolTxns, pool_name, open_config)) + sdk_gen_pool_handler(tdirWithPoolTxns, pool_name, open_config)) yield pool_handle try: looper.loop.run_until_complete(close_pool_ledger(pool_handle)) @@ -897,18 +913,18 @@ def sdk_pool_handle(looper, txnPoolNodeSet, tdirWithPoolTxns, sdk_pool_data): logger.debug("Unhandled exception: {}".format(e)) -async def _gen_wallet_handler(wallet_data): +async def sdk_gen_wallet_handler(wallet_data): wallet_config, wallet_credentials = wallet_data - await create_wallet(wallet_config, wallet_credentials) - wallet_handle = await open_wallet(wallet_config, wallet_credentials) + await sdk_create_wallet(wallet_config, wallet_credentials) + wallet_handle = await sdk_open_wallet(wallet_config, wallet_credentials) return wallet_handle @pytest.fixture(scope='module') def sdk_wallet_handle(looper, sdk_wallet_data): - wallet_handle = looper.loop.run_until_complete(_gen_wallet_handler(sdk_wallet_data)) + wallet_handle = looper.loop.run_until_complete(sdk_gen_wallet_handler(sdk_wallet_data)) yield wallet_handle - looper.loop.run_until_complete(close_wallet(wallet_handle)) + looper.loop.run_until_complete(sdk_close_wallet(wallet_handle)) @pytest.fixture(scope='module') @@ -944,7 +960,7 @@ def sdk_new_client_seed(): @pytest.fixture(scope='module') def sdk_wallet_trustee(looper, sdk_wallet_handle, sdk_trustee_seed): (trustee_did, trustee_verkey) = looper.loop.run_until_complete( - create_and_store_my_did(sdk_wallet_handle, + sdk_create_and_store_my_did(sdk_wallet_handle, json.dumps({'seed': sdk_trustee_seed}))) return sdk_wallet_handle, trustee_did @@ -952,7 +968,7 @@ def sdk_wallet_trustee(looper, sdk_wallet_handle, sdk_trustee_seed): @pytest.fixture(scope='module') def sdk_wallet_steward(looper, sdk_wallet_handle, sdk_steward_seed): (steward_did, steward_verkey) = looper.loop.run_until_complete( - create_and_store_my_did(sdk_wallet_handle, + sdk_create_and_store_my_did(sdk_wallet_handle, json.dumps({'seed': sdk_steward_seed}))) return sdk_wallet_handle, steward_did @@ -972,7 +988,7 @@ def sdk_wallet_stewards(looper, sdk_wallet_handle, poolTxnStewardNames, poolTxnD for name in poolTxnStewardNames: seed = poolTxnData["seeds"][name] (steward_did, steward_verkey) = looper.loop.run_until_complete( - create_and_store_my_did(sdk_wallet_handle, + sdk_create_and_store_my_did(sdk_wallet_handle, json.dumps({'seed': seed}))) stewards.append((sdk_wallet_handle, steward_did)) @@ -982,7 +998,7 @@ def sdk_wallet_stewards(looper, sdk_wallet_handle, poolTxnStewardNames, poolTxnD @pytest.fixture(scope='module') def sdk_wallet_client(looper, sdk_wallet_handle, sdk_client_seed): (client_did, _) = looper.loop.run_until_complete( - create_and_store_my_did(sdk_wallet_handle, + sdk_create_and_store_my_did(sdk_wallet_handle, json.dumps({'seed': sdk_client_seed}))) return sdk_wallet_handle, client_did @@ -990,7 +1006,7 @@ def sdk_wallet_client(looper, sdk_wallet_handle, sdk_client_seed): @pytest.fixture(scope='module') def sdk_wallet_client2(looper, sdk_wallet_handle, sdk_client_seed2): (client_did, _) = looper.loop.run_until_complete( - create_and_store_my_did(sdk_wallet_handle, + sdk_create_and_store_my_did(sdk_wallet_handle, json.dumps({'seed': sdk_client_seed2}))) return sdk_wallet_handle, client_did @@ -1004,8 +1020,273 @@ def sdk_wallet_new_client(looper, sdk_pool_handle, sdk_wallet_steward, return wh, client_did +# @pytest.fixture(scope="module") +# def create_node_and_not_start(testNodeClass, +# node_config_helper_class, +# tconf, +# tdir, +# allPluginsPath, +# looper, +# tdirWithPoolTxns, +# tdirWithDomainTxns, +# tdirWithNodeKeepInited): +# with ExitStack() as exitStack: +# node = exitStack.enter_context(create_new_test_node(testNodeClass, +# node_config_helper_class, +# "Alpha", +# tconf, +# tdir, +# allPluginsPath)) +# node.write_manager.on_catchup_finished() +# yield node +# node.stop() + + +# @pytest.fixture(scope='function') +# def view_change_done(looper, txnPoolNodeSet): +# ensure_view_change(looper, txnPoolNodeSet) +# ensureElectionsDone(looper=looper, nodes=txnPoolNodeSet) + + +# @pytest.fixture(scope='function', +# params=['primary', 'non-primary']) +# def one_replica_and_others_in_backup_instance( +# request, txnPoolNodeSet, view_change_done): + +# # NOTICE: This parametrized fixture triggers view change as pre-condition + +# backup_inst_id = 1 + +# primary = getPrimaryReplica(txnPoolNodeSet, backup_inst_id) +# non_primaries = getNonPrimaryReplicas(txnPoolNodeSet, backup_inst_id) + +# if request.param == 'primary': +# return primary, non_primaries +# else: +# return non_primaries[0], [primary] + non_primaries[1:] + + +# @pytest.fixture(scope='function') +# def test_node(tdirWithPoolTxns, +# tdirWithDomainTxns, +# poolTxnNodeNames, +# tdirWithNodeKeepInited, +# tdir, +# tconf, +# allPluginsPath): +# node_name = poolTxnNodeNames[0] +# config_helper = PNodeConfigHelper(node_name, tconf, chroot=tdir) +# node = TestNode( +# node_name, +# config_helper=config_helper, +# config=tconf, +# pluginPaths=allPluginsPath) +# yield node +# node.onStopping() # TODO stop won't call onStopping as we are in Stopped state + + +@pytest.fixture(scope="module") +def sdk_node_created_after_some_txns(looper, testNodeClass, do_post_node_creation, + sdk_pool_handle, sdk_wallet_client, sdk_wallet_steward, + txnPoolNodeSet, tdir, tconf, allPluginsPath, request): + txnCount = getValueFromModule(request, "txnCount", 5) + sdk_send_random_and_check(looper, txnPoolNodeSet, + sdk_pool_handle, + sdk_wallet_client, + txnCount) + new_steward_name = randomString() + new_node_name = "Epsilon" + new_steward_wallet_handle, new_node = sdk_add_new_steward_and_node( + looper, sdk_pool_handle, sdk_wallet_steward, + new_steward_name, new_node_name, tdir, tconf, nodeClass=testNodeClass, + allPluginsPath=allPluginsPath, autoStart=True, + do_post_node_creation=do_post_node_creation) + sdk_pool_refresh(looper, sdk_pool_handle) + yield looper, new_node, sdk_pool_handle, new_steward_wallet_handle + + +@pytest.fixture(scope="module") +def sdk_node_set_with_node_added_after_some_txns( + txnPoolNodeSet, sdk_node_created_after_some_txns): + looper, new_node, sdk_pool_handle, new_steward_wallet_handle = \ + sdk_node_created_after_some_txns + txnPoolNodeSet.append(new_node) + looper.run(checkNodesConnected(txnPoolNodeSet)) + sdk_pool_refresh(looper, sdk_pool_handle) + return looper, new_node, sdk_pool_handle, new_steward_wallet_handle + + +@pytest.fixture(scope="module") +def sdk_new_node_caught_up(txnPoolNodeSet, + sdk_node_set_with_node_added_after_some_txns): + looper, new_node, _, _ = sdk_node_set_with_node_added_after_some_txns + waitNodeDataEquality(looper, new_node, *txnPoolNodeSet[:4], + exclude_from_check=['check_last_ordered_3pc_backup']) + check_last_3pc_master(new_node, txnPoolNodeSet[:4]) + + # Check if catchup done once + catchup_done_once = True + for leecher in new_node.ledgerManager._node_leecher._leechers.values(): + catchup_done_once = catchup_done_once and (leecher.num_txns_caught_up > 0) + + if not catchup_done_once: + # It might be the case that node has to do catchup again, in that case + # check the return value of `num_txns_caught_up_in_last_catchup` to be + # greater than 0 + + assert max( + getAllReturnVals( + new_node, + new_node.num_txns_caught_up_in_last_catchup)) > 0 + + return new_node + +# ####### VDR fixtures ######### + + +@pytest.fixture(scope='module') +def vdr_pool_data(txnPoolNodeSet): + p_name = "pool_name_" + randomText(13) + cfg = {"timeout": 20, "extended_timeout": 60, "conn_limit": 100000, "conn_active_timeout": 1000, + "preordered_nodes": [n.name for n in txnPoolNodeSet]} + yield p_name, json.dumps(cfg) + p_dir = os.path.join(os.path.expanduser("~/.indy_client/pool"), p_name) + if os.path.isdir(p_dir): + shutil.rmtree(p_dir, ignore_errors=True) + + +@pytest.fixture(scope='module') +def vdr_wallet_data(): + w_name = "wallet_name_" + randomText(13) + vdr_wallet_credentials = '{"key": "key"}' + vdr_wallet_config = json.dumps({"id": w_name}) + yield vdr_wallet_config, vdr_wallet_credentials + w_dir = os.path.join(os.path.expanduser("~/.indy_client/wallet"), w_name) + if os.path.isdir(w_dir): + shutil.rmtree(w_dir, ignore_errors=True) + + +async def vdr_gen_pool_handler(work_dir, name, open_config): + txn_file_name = os.path.join(work_dir, "pool_transactions_genesis") + pool_handle, _ = await vdr_pool_helper(txn_file_name) + return pool_handle + + +@pytest.fixture(scope='module') +def vdr_pool_handle(looper, txnPoolNodeSet, tdirWithPoolTxns, vdr_pool_data): + pool_name, open_config = vdr_pool_data + pool_handle = looper.loop.run_until_complete( + vdr_gen_pool_handler(tdirWithPoolTxns, pool_name, open_config)) + yield pool_handle + + +@pytest.fixture(scope="session", autouse=True) +def set_proto_ver(): + """ + Sets protocol version to 2 + """ + yield set_protocol_version(2) + + @pytest.fixture(scope="module") -def create_node_and_not_start(testNodeClass, +async def vdr_wallet_handle(set_proto_ver): + """ + Creates a wallet handle and yields it + """ + wallet_handle, _, _ = await vdr_wallet_helper() + yield wallet_handle + + +@pytest.fixture(scope='module') +def vdr_trustee_seed(trustee_data): + _, seed = trustee_data[0] + return seed + + +@pytest.fixture(scope='module') +def vdr_steward_seed(poolTxnStewardData): + _, seed = poolTxnStewardData + return seed.decode() + + +@pytest.fixture(scope='module') +def vdr_client_seed(poolTxnClientData): + _, seed = poolTxnClientData + return seed.decode() + + +@pytest.fixture(scope='module') +def vdr_client_seed2(poolTxnClientNames, poolTxnData): + name = poolTxnClientNames[1] + seed = poolTxnData["seeds"][name] + return seed + + +@pytest.fixture(scope='module') +def vdr_new_client_seed(): + return "Client10000000000000000000000000" + + +@pytest.fixture(scope='module') +def vdr_wallet_trustee(looper, vdr_wallet_handle, vdr_trustee_seed): + (trustee_did, trustee_verkey) = looper.loop.run_until_complete( + vdr_create_and_store_did(vdr_wallet_handle,vdr_trustee_seed)) + return vdr_wallet_handle, trustee_did + + +@pytest.fixture(scope='module') +def vdr_wallet_steward(looper, vdr_wallet_handle, vdr_steward_seed): + (steward_did, steward_verkey) = looper.loop.run_until_complete( + vdr_create_and_store_did(vdr_wallet_handle,vdr_steward_seed)) + return vdr_wallet_handle, steward_did + + +@pytest.fixture(scope='module') +def vdr_wallet_new_steward(looper, sdk_pool_handle, vdr_wallet_steward): + wh, client_did = vdr_add_new_nym(looper, sdk_pool_handle, + vdr_wallet_steward, + alias='new_steward_qwerty', + role='STEWARD') + return wh, client_did, vdr_pool_handle +vdr_pool_handle + +@pytest.fixture(scope='module') +def vdr_wallet_stewards(looper, vdr_wallet_handle, poolTxnStewardNames, poolTxnData): + stewards = [] + for name in poolTxnStewardNames: + seed = poolTxnData["seeds"][name] + (steward_did, steward_verkey) = looper.loop.run_until_complete( + vdr_create_and_store_did(vdr_wallet_handle,seed)) + stewards.append((vdr_wallet_handle, steward_did)) + + yield stewards + + +@pytest.fixture(scope='module') +def vdr_wallet_client(looper, vdr_wallet_handle, vdr_client_seed): + (client_did, _) = looper.loop.run_until_complete( + vdr_create_and_store_did(vdr_wallet_handle, vdr_client_seed)) + return vdr_wallet_handle, client_did + + +@pytest.fixture(scope='module') +def vdr_wallet_client2(looper, vdr_wallet_handle, vdr_client_seed2): + (client_did, _) = looper.loop.run_until_complete( + vdr_create_and_store_did(vdr_wallet_handle, vdr_client_seed2)) + return vdr_wallet_handle, client_did + + +@pytest.fixture(scope='module') +def vdr_wallet_new_client(looper, sdk_pool_handle, vdr_wallet_steward, + vdr_new_client_seed): + wh, client_did = vdr_add_new_nym(looper, sdk_pool_handle, + vdr_wallet_steward, + seed=vdr_new_client_seed) + return wh, client_did, vdr_pool_handle + +vdr_pool_handle +@pytest.fixture(scope="module") +def vdr_create_node_and_not_start(testNodeClass, node_config_helper_class, tconf, tdir, @@ -1027,15 +1308,15 @@ def create_node_and_not_start(testNodeClass, @pytest.fixture(scope='function') -def view_change_done(looper, txnPoolNodeSet): +def vdr_view_change_done(looper, txnPoolNodeSet): ensure_view_change(looper, txnPoolNodeSet) ensureElectionsDone(looper=looper, nodes=txnPoolNodeSet) @pytest.fixture(scope='function', params=['primary', 'non-primary']) -def one_replica_and_others_in_backup_instance( - request, txnPoolNodeSet, view_change_done): +def vdr_one_replica_and_others_in_backup_instance( + request, txnPoolNodeSet, vdr_view_change_done): # NOTICE: This parametrized fixture triggers view change as pre-condition @@ -1051,7 +1332,7 @@ def one_replica_and_others_in_backup_instance( @pytest.fixture(scope='function') -def test_node(tdirWithPoolTxns, +def vdr_test_node(tdirWithPoolTxns, tdirWithDomainTxns, poolTxnNodeNames, tdirWithNodeKeepInited, @@ -1070,40 +1351,39 @@ def test_node(tdirWithPoolTxns, @pytest.fixture(scope="module") -def sdk_node_created_after_some_txns(looper, testNodeClass, do_post_node_creation, - sdk_pool_handle, sdk_wallet_client, sdk_wallet_steward, +def vdr_node_created_after_some_txns(looper, testNodeClass, do_post_node_creation, + vdr_pool_handle, vdr_wallet_client, vdr_wallet_steward, txnPoolNodeSet, tdir, tconf, allPluginsPath, request): txnCount = getValueFromModule(request, "txnCount", 5) - sdk_send_random_and_check(looper, txnPoolNodeSet, - sdk_pool_handle, - sdk_wallet_client, + vdr_send_random_and_check(looper, txnPoolNodeSet, vdr_pool_handle, + vdr_wallet_client, vdr_pool_handle, txnCount) new_steward_name = randomString() new_node_name = "Epsilon" - new_steward_wallet_handle, new_node = sdk_add_new_steward_and_node( - looper, sdk_pool_handle, sdk_wallet_steward, + new_steward_wallet_handle, pool_handle = vdr_add_new_steward_and_node( + looper, vdr_pool_handle, vdr_wallet_steward, new_steward_name, new_node_name, tdir, tconf, nodeClass=testNodeClass, allPluginsPath=allPluginsPath, autoStart=True, do_post_node_creation=do_post_node_creation) - sdk_pool_refresh(looper, sdk_pool_handle) - yield looper, new_node, sdk_pool_handle, new_steward_wallet_handle + vdr_pool_refresh(looper, vdr_pool_handle) + yield looper, pool_handle, vdr_pool_handle, new_steward_wallet_handle @pytest.fixture(scope="module") -def sdk_node_set_with_node_added_after_some_txns( - txnPoolNodeSet, sdk_node_created_after_some_txns): +def vdr_node_set_with_node_added_after_some_txns( + txnPoolNodeSet, vdr_node_created_after_some_txns): looper, new_node, sdk_pool_handle, new_steward_wallet_handle = \ - sdk_node_created_after_some_txns + vdr_node_created_after_some_txns txnPoolNodeSet.append(new_node) looper.run(checkNodesConnected(txnPoolNodeSet)) - sdk_pool_refresh(looper, sdk_pool_handle) + vdr_pool_refresh(looper, sdk_pool_handle) return looper, new_node, sdk_pool_handle, new_steward_wallet_handle @pytest.fixture(scope="module") -def sdk_new_node_caught_up(txnPoolNodeSet, - sdk_node_set_with_node_added_after_some_txns): - looper, new_node, _, _ = sdk_node_set_with_node_added_after_some_txns +def vdr_new_node_caught_up(txnPoolNodeSet, + vdr_node_set_with_node_added_after_some_txns): + looper, new_node, _, _ = vdr_node_set_with_node_added_after_some_txns waitNodeDataEquality(looper, new_node, *txnPoolNodeSet[:4], exclude_from_check=['check_last_ordered_3pc_backup']) check_last_3pc_master(new_node, txnPoolNodeSet[:4]) @@ -1126,6 +1406,7 @@ def sdk_new_node_caught_up(txnPoolNodeSet, return new_node + @pytest.fixture(params=range(100)) def random(request): return DefaultSimRandom(request.param) diff --git a/plenum/test/consensus/order_service/conftest.py b/plenum/test/consensus/order_service/conftest.py index 876ee6d6b9..75d538d4a2 100644 --- a/plenum/test/consensus/order_service/conftest.py +++ b/plenum/test/consensus/order_service/conftest.py @@ -10,7 +10,7 @@ from plenum.server.consensus.primary_selector import RoundRobinConstantNodesPrimariesSelector from plenum.server.replica_freshness_checker import FreshnessChecker from plenum.test.consensus.order_service.helper import _register_pp_ts -from plenum.test.helper import sdk_random_request_objects, create_pre_prepare_params +from plenum.test.helper import vdr_random_request_objects, create_pre_prepare_params from plenum.test.bls.conftest import fake_state_root_hash, fake_multi_sig, fake_multi_sig_value from plenum.test.testing_utils import FakeSomething @@ -86,7 +86,7 @@ def pre_prepare(orderer, _pre_prepare): @pytest.fixture() def fake_requests(): - return sdk_random_request_objects(10, identifier="fake_did", + return vdr_random_request_objects(10, identifier="fake_did", protocol_version=CURRENT_PROTOCOL_VERSION) diff --git a/plenum/test/consensus/order_service/sim_helper.py b/plenum/test/consensus/order_service/sim_helper.py index 278bdfef64..a26fb80df0 100644 --- a/plenum/test/consensus/order_service/sim_helper.py +++ b/plenum/test/consensus/order_service/sim_helper.py @@ -3,7 +3,7 @@ from plenum.common.startable import Mode from plenum.server.consensus.utils import replica_name_to_node_name from plenum.test.consensus.helper import SimPool -from plenum.test.helper import sdk_random_request_objects +from plenum.test.helper import vdr_random_request_objects from stp_core.common.log import getlogger @@ -13,7 +13,7 @@ def create_requests(count): - return sdk_random_request_objects(count, CURRENT_PROTOCOL_VERSION) + return vdr_random_request_objects(count, CURRENT_PROTOCOL_VERSION) def create_pool(random): diff --git a/plenum/test/consensus/order_service/test_ordering_process_preprepare.py b/plenum/test/consensus/order_service/test_ordering_process_preprepare.py index f570ba5182..f789558c96 100644 --- a/plenum/test/consensus/order_service/test_ordering_process_preprepare.py +++ b/plenum/test/consensus/order_service/test_ordering_process_preprepare.py @@ -9,7 +9,7 @@ from plenum.server.replica_helper import PP_SUB_SEQ_NO_WRONG, PP_NOT_FINAL from plenum.server.suspicion_codes import Suspicions from plenum.test.consensus.order_service.helper import _register_pp_ts, check_suspicious -from plenum.test.helper import sdk_random_request_objects, create_pre_prepare_params +from plenum.test.helper import vdr_random_request_objects, create_pre_prepare_params from plenum.test.testing_utils import FakeSomething @@ -21,7 +21,7 @@ def pre_prepare(orderer, _pre_prepare): @pytest.fixture() def fake_requests(): - return sdk_random_request_objects(10, identifier="fake_did", + return vdr_random_request_objects(10, identifier="fake_did", protocol_version=CURRENT_PROTOCOL_VERSION) diff --git a/plenum/test/delayers.py b/plenum/test/delayers.py index c03c0c20b7..e035987201 100644 --- a/plenum/test/delayers.py +++ b/plenum/test/delayers.py @@ -90,8 +90,9 @@ def ppgDelay(delay: float = DEFAULT_DELAY, sender_filter: str = None): def ppDelay(delay: float = DEFAULT_DELAY, instId: int = None, sender_filter: str = None): # Delayer of PRE-PREPARE requests from a particular instance - return delayerMsgTuple(delay, PrePrepare, instFilter=instId, + res = delayerMsgTuple(delay, PrePrepare, instFilter=instId, senderFilter=sender_filter) + return res def pDelay(delay: float = DEFAULT_DELAY, instId: int = None, sender_filter: str = None): @@ -162,7 +163,8 @@ def old_view_pp_reply_delay(delay: float = DEFAULT_DELAY, ledger_filter=None): def req_delay(delay: float = DEFAULT_DELAY): # Delayer of Request requests - return delayerMsgTuple(delay, Request) + res = delayerMsgTuple(delay, Request) + return res def msg_req_delay(delay: float = DEFAULT_DELAY, types_to_delay: List = None): diff --git a/plenum/test/demotion_promotion/test_demote_from_10_to_4_nodes.py b/plenum/test/demotion_promotion/test_demote_from_10_to_4_nodes.py index 01704a225d..9569fc33a9 100644 --- a/plenum/test/demotion_promotion/test_demote_from_10_to_4_nodes.py +++ b/plenum/test/demotion_promotion/test_demote_from_10_to_4_nodes.py @@ -2,9 +2,9 @@ import pytest -from plenum.test.helper import checkViewNoForNodes, waitForViewChange, sdk_send_random_and_check +from plenum.test.helper import checkViewNoForNodes, waitForViewChange, vdr_send_random_and_check from plenum.test.node_catchup.helper import ensure_all_nodes_have_same_data -from plenum.test.node_request.helper import sdk_ensure_pool_functional +from plenum.test.node_request.helper import vdr_ensure_pool_functional from plenum.test.pool_transactions.helper import demote_node, promote_node from plenum.test.test_node import ensureElectionsDone from plenum.test.view_change.helper import restart_node @@ -27,8 +27,8 @@ def tconf(tconf): def test_demote_promote_restart_after_promotion_from_10_to_4_nodes(txnPoolNodeSet, looper, - sdk_pool_handle, - sdk_wallet_steward, + vdr_pool_handle, + vdr_wallet_steward, tdir, tconf, allPluginsPath): @@ -41,7 +41,7 @@ def demote_another_one(rest_pool): starting_view_no = checkViewNoForNodes(rest_pool) - demote_node(looper, sdk_wallet_steward, sdk_pool_handle, demoted_node) + demote_node(looper, vdr_wallet_steward, vdr_pool_handle, demoted_node) waitForViewChange(looper, rest_pool, expectedViewNo=starting_view_no + 1) ensureElectionsDone(looper, rest_pool, customTimeout=60) @@ -52,10 +52,10 @@ def demote_another_one(rest_pool): etalon_node = txnPoolNodeSet[-1] while len(rest_nodes) > 4: rest_nodes = demote_another_one(rest_nodes) - sdk_send_random_and_check(looper, rest_nodes, sdk_pool_handle, sdk_wallet_steward, 5) + vdr_send_random_and_check(looper, rest_nodes, vdr_pool_handle, vdr_wallet_steward, 5) starting_view_no = checkViewNoForNodes(rest_nodes) - promote_node(looper, sdk_wallet_steward, sdk_pool_handle, etalon_node) + promote_node(looper, vdr_wallet_steward, vdr_pool_handle, etalon_node) waitForViewChange(looper, rest_nodes, expectedViewNo=starting_view_no + 1) ensure_all_nodes_have_same_data(looper, rest_nodes) @@ -64,4 +64,4 @@ def demote_another_one(rest_pool): restart_node(looper, rest_nodes, etalon_node, tconf, tdir, allPluginsPath) ensureElectionsDone(looper, rest_nodes) - sdk_ensure_pool_functional(looper, rest_nodes, sdk_wallet_steward, sdk_pool_handle) + vdr_ensure_pool_functional(looper, rest_nodes, vdr_wallet_steward, vdr_pool_handle) diff --git a/plenum/test/demotion_promotion/test_demote_promote.py b/plenum/test/demotion_promotion/test_demote_promote.py index 78bed97267..d1cbb7c8b3 100644 --- a/plenum/test/demotion_promotion/test_demote_promote.py +++ b/plenum/test/demotion_promotion/test_demote_promote.py @@ -1,8 +1,8 @@ import pytest -from plenum.test.helper import checkViewNoForNodes, waitForViewChange, sdk_send_random_and_check +from plenum.test.helper import checkViewNoForNodes, waitForViewChange, vdr_send_random_and_check from plenum.test.node_catchup.helper import ensure_all_nodes_have_same_data -from plenum.test.node_request.helper import sdk_ensure_pool_functional +from plenum.test.node_request.helper import vdr_ensure_pool_functional from plenum.test.pool_transactions.helper import demote_node, promote_node from plenum.test.test_node import ensureElectionsDone from plenum.test.view_change.helper import restart_node @@ -21,8 +21,8 @@ def tconf(tconf): def test_demote_promote_restart_after_promotion(txnPoolNodeSet, looper, - sdk_pool_handle, - sdk_wallet_steward, + vdr_pool_handle, + vdr_wallet_steward, tdir, tconf, allPluginsPath): @@ -31,16 +31,16 @@ def test_demote_promote_restart_after_promotion(txnPoolNodeSet, starting_view_no = checkViewNoForNodes(txnPoolNodeSet) - demote_node(looper, sdk_wallet_steward, sdk_pool_handle, demoted_node) + demote_node(looper, vdr_wallet_steward, vdr_pool_handle, demoted_node) waitForViewChange(looper, rest_nodes, expectedViewNo=starting_view_no + 1) ensureElectionsDone(looper, rest_nodes) ensure_all_nodes_have_same_data(looper, rest_nodes) - sdk_send_random_and_check(looper, rest_nodes, sdk_pool_handle, sdk_wallet_steward, 5) + vdr_send_random_and_check(looper, rest_nodes, vdr_pool_handle, vdr_wallet_steward, 5) starting_view_no = checkViewNoForNodes(rest_nodes) - promote_node(looper, sdk_wallet_steward, sdk_pool_handle, demoted_node) + promote_node(looper, vdr_wallet_steward, vdr_pool_handle, demoted_node) waitForViewChange(looper, rest_nodes, expectedViewNo=starting_view_no + 1) ensureElectionsDone(looper, rest_nodes) @@ -49,4 +49,4 @@ def test_demote_promote_restart_after_promotion(txnPoolNodeSet, restart_node(looper, txnPoolNodeSet, demoted_node, tconf, tdir, allPluginsPath) ensureElectionsDone(looper, txnPoolNodeSet) - sdk_ensure_pool_functional(looper, txnPoolNodeSet, sdk_wallet_steward, sdk_pool_handle) + vdr_ensure_pool_functional(looper, txnPoolNodeSet, vdr_wallet_steward, vdr_pool_handle) diff --git a/plenum/test/demotion_promotion/test_demote_promote_7_nodes.py b/plenum/test/demotion_promotion/test_demote_promote_7_nodes.py index b38f297601..6f34ddc91b 100644 --- a/plenum/test/demotion_promotion/test_demote_promote_7_nodes.py +++ b/plenum/test/demotion_promotion/test_demote_promote_7_nodes.py @@ -1,8 +1,8 @@ import pytest -from plenum.test.helper import checkViewNoForNodes, waitForViewChange, sdk_send_random_and_check +from plenum.test.helper import checkViewNoForNodes, waitForViewChange, vdr_send_random_and_check from plenum.test.node_catchup.helper import ensure_all_nodes_have_same_data -from plenum.test.node_request.helper import sdk_ensure_pool_functional +from plenum.test.node_request.helper import vdr_ensure_pool_functional from plenum.test.pool_transactions.helper import demote_node, promote_node from plenum.test.test_node import ensureElectionsDone from plenum.test.view_change.helper import restart_node @@ -21,8 +21,8 @@ def tconf(tconf): def test_demote_promote_restart_after_promotion_7_nodes(txnPoolNodeSet, looper, - sdk_pool_handle, - sdk_wallet_steward, + vdr_pool_handle, + vdr_wallet_steward, tdir, tconf, allPluginsPath): @@ -31,16 +31,16 @@ def test_demote_promote_restart_after_promotion_7_nodes(txnPoolNodeSet, starting_view_no = checkViewNoForNodes(txnPoolNodeSet) - demote_node(looper, sdk_wallet_steward, sdk_pool_handle, demoted_node) + demote_node(looper, vdr_wallet_steward, vdr_pool_handle, demoted_node) waitForViewChange(looper, rest_nodes, expectedViewNo=starting_view_no + 1) ensureElectionsDone(looper, rest_nodes) ensure_all_nodes_have_same_data(looper, rest_nodes) - sdk_send_random_and_check(looper, rest_nodes, sdk_pool_handle, sdk_wallet_steward, 5) + vdr_send_random_and_check(looper, rest_nodes, vdr_pool_handle, vdr_wallet_steward, 5) starting_view_no = checkViewNoForNodes(rest_nodes) - promote_node(looper, sdk_wallet_steward, sdk_pool_handle, demoted_node) + promote_node(looper, vdr_wallet_steward, vdr_pool_handle, demoted_node) waitForViewChange(looper, rest_nodes, expectedViewNo=starting_view_no + 1) ensureElectionsDone(looper, rest_nodes, instances_list=[0, 1, 2]) @@ -49,4 +49,4 @@ def test_demote_promote_restart_after_promotion_7_nodes(txnPoolNodeSet, restart_node(looper, txnPoolNodeSet, demoted_node, tconf, tdir, allPluginsPath) ensureElectionsDone(looper, txnPoolNodeSet) - sdk_ensure_pool_functional(looper, txnPoolNodeSet, sdk_wallet_steward, sdk_pool_handle) + vdr_ensure_pool_functional(looper, txnPoolNodeSet, vdr_wallet_steward, vdr_pool_handle) diff --git a/plenum/test/forced_request/test_forced_request_validation.py b/plenum/test/forced_request/test_forced_request_validation.py index 8634dc47af..d4691442c5 100644 --- a/plenum/test/forced_request/test_forced_request_validation.py +++ b/plenum/test/forced_request/test_forced_request_validation.py @@ -8,14 +8,14 @@ from plenum.common.util import randomString from plenum.common.constants import FORCE -from plenum.test.pool_transactions.helper import sdk_sign_and_send_prepared_request, prepare_nym_request, \ +from plenum.test.pool_transactions.helper import sdk_sign_and_send_prepared_request, sdk_prepare_nym_request, \ sdk_add_new_nym def test_forced_request_validation(looper, txnPoolNodeSet, sdk_wallet_client, sdk_pool_handle, sdk_wallet_steward): nym_request, new_did = looper.loop.run_until_complete( - prepare_nym_request(sdk_wallet_client, randomString(32), + sdk_prepare_nym_request(sdk_wallet_client, randomString(32), None, None)) request_json = json.loads(nym_request) diff --git a/plenum/test/freeze_ledgers/helper.py b/plenum/test/freeze_ledgers/helper.py index 3bd22ddf0b..ec2523b32b 100644 --- a/plenum/test/freeze_ledgers/helper.py +++ b/plenum/test/freeze_ledgers/helper.py @@ -1,19 +1,21 @@ import json +from typing import List -from indy.ledger import build_ledgers_freeze_request, build_get_frozen_ledgers_request -from plenum.test.helper import sdk_get_and_check_replies, \ - sdk_send_signed_requests, sdk_sign_and_submit_req_obj, sdk_multi_sign_request_objects, sdk_json_to_request_object +from indy_vdr import ledger +from plenum.test.helper import vdr_get_and_check_replies, \ + vdr_send_signed_requests, vdr_sign_and_submit_req_obj, vdr_multi_sign_request_objects, vdr_json_to_request_object -def sdk_send_freeze_ledgers(looper, sdk_pool_handle, sdk_wallets, ledgers_ids: [int]): - req = looper.loop.run_until_complete(build_ledgers_freeze_request(sdk_wallets[0][1], ledgers_ids)) - signed_reqs = sdk_multi_sign_request_objects(looper, sdk_wallets, - [sdk_json_to_request_object(json.loads(req))]) - reps = sdk_send_signed_requests(sdk_pool_handle, signed_reqs) - return sdk_get_and_check_replies(looper, reps)[0] +def sdk_send_freeze_ledgers(looper, sdk_pool_handle, sdk_wallets, ledgers_ids: List[int]): + req = looper.loop.run_until_complete(ledger.build_ledgers_freeze_request(sdk_wallets[0][1], ledgers_ids)) + signed_reqs = vdr_multi_sign_request_objects(looper, sdk_wallets, + [vdr_json_to_request_object(json.loads(req))]) + reps = vdr_send_signed_requests(sdk_pool_handle, signed_reqs, looper) + return vdr_get_and_check_replies(looper, reps)[0] +# sdk_wallet needs to be DID with new function since wallet no longer tuple def sdk_get_frozen_ledgers(looper, sdk_pool_handle, sdk_wallet): - req = looper.loop.run_until_complete(build_get_frozen_ledgers_request(sdk_wallet[1])) - rep = sdk_sign_and_submit_req_obj(looper, sdk_pool_handle, sdk_wallet, sdk_json_to_request_object(json.loads(req))) - return sdk_get_and_check_replies(looper, [rep])[0] + req = looper.loop.run_until_complete(ledger.build_get_frozen_ledgers_request(sdk_wallet[1])) + rep = vdr_sign_and_submit_req_obj(looper, sdk_pool_handle, sdk_wallet, vdr_json_to_request_object(json.loads(req))) + return vdr_get_and_check_replies(looper, [rep])[0] diff --git a/plenum/test/freshness/test_freshness_after_catchup.py b/plenum/test/freshness/test_freshness_after_catchup.py index 3753fac840..0394a2c0ae 100644 --- a/plenum/test/freshness/test_freshness_after_catchup.py +++ b/plenum/test/freshness/test_freshness_after_catchup.py @@ -5,7 +5,7 @@ from plenum.test.node_catchup.helper import waitNodeDataEquality from plenum.test.pool_transactions.helper import disconnect_node_and_ensure_disconnected -from plenum.test.helper import assertExp, sdk_send_random_and_check, freshness +from plenum.test.helper import assertExp, vdr_send_random_and_check, freshness from plenum.test.test_node import checkNodesConnected from plenum.test.view_change.helper import start_stopped_node from stp_core.loop.eventually import eventually @@ -21,9 +21,9 @@ def tconf(tconf): def test_freshness_after_catchup(looper, txnPoolNodeSet, - sdk_pool_handle, - sdk_wallet_client, - sdk_wallet_steward, + vdr_pool_handle, + vdr_wallet_client, + vdr_wallet_steward, tconf, tdir, allPluginsPath): @@ -66,6 +66,6 @@ def test_freshness_after_catchup(looper, waitNodeDataEquality(looper, *txnPoolNodeSet) assert all(n.viewNo == view_no for n in txnPoolNodeSet) - sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle, - sdk_wallet_client, 1) + vdr_send_random_and_check(looper, txnPoolNodeSet, vdr_pool_handle, + vdr_wallet_client, 1) waitNodeDataEquality(looper, *txnPoolNodeSet) diff --git a/plenum/test/freshness/test_freshness_batch_updates_last_ordered.py b/plenum/test/freshness/test_freshness_batch_updates_last_ordered.py index bd36ddab58..df1e7f8e26 100644 --- a/plenum/test/freshness/test_freshness_batch_updates_last_ordered.py +++ b/plenum/test/freshness/test_freshness_batch_updates_last_ordered.py @@ -5,7 +5,7 @@ from plenum.common.constants import DOMAIN_LEDGER_ID from plenum.test.freshness.helper import get_all_multi_sig_values_for_all_nodes, \ check_updated_bls_multi_sig_for_all_ledgers, check_freshness_updated_for_all -from plenum.test.helper import freshness, sdk_send_random_and_check, primary_disconnection_time +from plenum.test.helper import freshness, vdr_send_random_and_check, primary_disconnection_time from plenum.test.spy_helpers import getSpecificDiscardedMsg from plenum.test.view_change.helper import restart_node @@ -21,8 +21,8 @@ def tconf(tconf): yield tconf -def test_freshness_batch_updates_last_ordered(looper, txnPoolNodeSet, sdk_pool_handle, - sdk_wallet_steward, tconf, tdir, allPluginsPath): +def test_freshness_batch_updates_last_ordered(looper, txnPoolNodeSet, vdr_pool_handle, + vdr_wallet_steward, tconf, tdir, allPluginsPath): assert txnPoolNodeSet[0].master_replica.isPrimary looper.run(eventually( @@ -49,7 +49,7 @@ def test_freshness_batch_updates_last_ordered(looper, txnPoolNodeSet, sdk_pool_h old_discard = len(getSpecificDiscardedMsg(txnPoolNodeSet[1], PrePrepare)) # correct ordering - sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_steward, 1) + vdr_send_random_and_check(looper, txnPoolNodeSet, vdr_pool_handle, vdr_wallet_steward, 1) # domain ledger equeal assert txnPoolNodeSet[0].db_manager.get_txn_root_hash(DOMAIN_LEDGER_ID) == \ @@ -59,8 +59,8 @@ def test_freshness_batch_updates_last_ordered(looper, txnPoolNodeSet, sdk_pool_h assert len(getSpecificDiscardedMsg(txnPoolNodeSet[1], PrePrepare)) == old_discard -def test_freshness_batch_updates_last_ordered_non_primary(looper, txnPoolNodeSet, sdk_pool_handle, - sdk_wallet_steward, tconf, tdir, allPluginsPath): +def test_freshness_batch_updates_last_ordered_non_primary(looper, txnPoolNodeSet, vdr_pool_handle, + vdr_wallet_steward, tconf, tdir, allPluginsPath): looper.run(eventually( check_freshness_updated_for_all, txnPoolNodeSet, timeout=FRESHNESS_TIMEOUT * 2) @@ -80,7 +80,7 @@ def test_freshness_batch_updates_last_ordered_non_primary(looper, txnPoolNodeSet txnPoolNodeSet[1].master_replica.last_ordered_3pc # correct ordering - sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_steward, 1) + vdr_send_random_and_check(looper, txnPoolNodeSet, vdr_pool_handle, vdr_wallet_steward, 1) # domain ledger equeal assert txnPoolNodeSet[0].db_manager.get_txn_root_hash(DOMAIN_LEDGER_ID) == \ diff --git a/plenum/test/freshness/test_freshness_during_domain_ordering.py b/plenum/test/freshness/test_freshness_during_domain_ordering.py index fcd0042995..4fa5792ef3 100644 --- a/plenum/test/freshness/test_freshness_during_domain_ordering.py +++ b/plenum/test/freshness/test_freshness_during_domain_ordering.py @@ -2,7 +2,7 @@ from plenum.common.constants import DOMAIN_LEDGER_ID, POOL_LEDGER_ID from plenum.test.freshness.helper import check_update_bls_multi_sig_during_ordering -from plenum.test.helper import freshness, sdk_send_random_and_check +from plenum.test.helper import freshness, vdr_send_random_and_check FRESHNESS_TIMEOUT = 10 @@ -14,13 +14,13 @@ def tconf(tconf): def test_update_bls_multi_sig_during_domain_ordering(looper, tconf, txnPoolNodeSet, - sdk_pool_handle, - sdk_wallet_stewards): + vdr_pool_handle, + vdr_wallet_stewards): def send_txn(): - sdk_send_random_and_check(looper, txnPoolNodeSet, - sdk_pool_handle, - sdk_wallet_stewards[3], + vdr_send_random_and_check(looper, txnPoolNodeSet, + vdr_pool_handle, + vdr_wallet_stewards[3], 1) check_update_bls_multi_sig_during_ordering(looper, txnPoolNodeSet, diff --git a/plenum/test/freshness/test_freshness_during_pool_ordering.py b/plenum/test/freshness/test_freshness_during_pool_ordering.py index d236c0eeb2..efd1e59d8d 100644 --- a/plenum/test/freshness/test_freshness_during_pool_ordering.py +++ b/plenum/test/freshness/test_freshness_during_pool_ordering.py @@ -15,13 +15,13 @@ def tconf(tconf): def test_update_bls_multi_sig_during_pool_ordering(looper, tconf, txnPoolNodeSet, - sdk_pool_handle, - sdk_wallet_stewards): + vdr_pool_handle, + vdr_wallet_stewards): def send_txn(): sdk_change_bls_key(looper, txnPoolNodeSet, txnPoolNodeSet[3], - sdk_pool_handle, - sdk_wallet_stewards[3], + vdr_pool_handle, + vdr_wallet_stewards[3], check_functional=False) check_update_bls_multi_sig_during_ordering(looper, txnPoolNodeSet, diff --git a/plenum/test/freshness/test_freshness_in_catchup.py b/plenum/test/freshness/test_freshness_in_catchup.py index c840b615ea..b277701e97 100644 --- a/plenum/test/freshness/test_freshness_in_catchup.py +++ b/plenum/test/freshness/test_freshness_in_catchup.py @@ -1,7 +1,7 @@ import pytest from plenum.test.delayers import cr_delay, cDelay -from plenum.test.helper import sdk_send_random_and_check, freshness +from plenum.test.helper import vdr_send_random_and_check, freshness from plenum.test.stasher import delay_rules STATE_FRESHNESS_UPDATE_INTERVAL = 5 @@ -19,8 +19,8 @@ def tconf(tconf): def test_freshness_in_catchup(looper, txnPoolNodeSet, - sdk_pool_handle, - sdk_wallet_steward, + vdr_pool_handle, + vdr_wallet_steward, tconf, ): """ Check that InstanceChange with reason "State signatures are not updated for too long" @@ -31,7 +31,7 @@ def test_freshness_in_catchup(looper, lagging_node = txnPoolNodeSet[-1] with delay_rules(lagging_node.nodeIbStasher, cr_delay(), cDelay()): - sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_steward, 1) + vdr_send_random_and_check(looper, txnPoolNodeSet, vdr_pool_handle, vdr_wallet_steward, 1) lagging_node.start_catchup() looper.runFor(tconf.ACCEPTABLE_FRESHNESS_INTERVALS_COUNT * tconf.STATE_FRESHNESS_UPDATE_INTERVAL + 5) diff --git a/plenum/test/freshness/test_freshness_instance_changes_are_sent_continuosly.py b/plenum/test/freshness/test_freshness_instance_changes_are_sent_continuosly.py index eb1b57f304..0d9fb9546b 100644 --- a/plenum/test/freshness/test_freshness_instance_changes_are_sent_continuosly.py +++ b/plenum/test/freshness/test_freshness_instance_changes_are_sent_continuosly.py @@ -16,7 +16,7 @@ def tconf(tconf): def test_freshness_instance_changes_are_sent_continuosly(looper, tconf, txnPoolNodeSet, - sdk_wallet_client, sdk_pool_handle): + vdr_wallet_client, vdr_pool_handle): current_view_no = txnPoolNodeSet[0].viewNo for node in txnPoolNodeSet: assert node.viewNo == current_view_no diff --git a/plenum/test/freshness/test_freshness_view_change.py b/plenum/test/freshness/test_freshness_view_change.py index 605d2ec117..b8c7b39d32 100644 --- a/plenum/test/freshness/test_freshness_view_change.py +++ b/plenum/test/freshness/test_freshness_view_change.py @@ -5,7 +5,7 @@ from plenum.common.startable import Mode from plenum.server.consensus.monitoring.freshness_monitor_service import FreshnessMonitorService from plenum.test.helper import freshness -from plenum.test.node_request.helper import sdk_ensure_pool_functional +from plenum.test.node_request.helper import vdr_ensure_pool_functional FRESHNESS_TIMEOUT = 5 @@ -48,7 +48,7 @@ def test_new_node_view_changer_state_is_fresh_enough(tconf, freshness_monitor_se def test_view_change_doesnt_happen_if_pool_is_left_alone(looper, tconf, txnPoolNodeSet, - sdk_wallet_client, sdk_pool_handle): + vdr_wallet_client, vdr_pool_handle): current_view_no = txnPoolNodeSet[0].viewNo for node in txnPoolNodeSet: assert node.viewNo == current_view_no @@ -58,7 +58,7 @@ def test_view_change_doesnt_happen_if_pool_is_left_alone(looper, tconf, txnPoolN for node in txnPoolNodeSet: assert node.viewNo == current_view_no - sdk_ensure_pool_functional(looper, txnPoolNodeSet, sdk_wallet_client, sdk_pool_handle) + vdr_ensure_pool_functional(looper, txnPoolNodeSet, vdr_wallet_client, vdr_pool_handle) def test_view_changer_state_is_not_fresh_in_view_change(tconf, diff --git a/plenum/test/freshness/test_replica_freshness.py b/plenum/test/freshness/test_replica_freshness.py index 9e92e3f68a..2e3bff2a8d 100644 --- a/plenum/test/freshness/test_replica_freshness.py +++ b/plenum/test/freshness/test_replica_freshness.py @@ -40,7 +40,7 @@ def inst_id(request): @pytest.fixture(scope='function') def replica_with_valid_requests(primary_replica): - requests = {ledger_id: sdk_random_request_objects(1, identifier="did", + requests = {ledger_id: vdr_random_request_objects(1, identifier="did", protocol_version=CURRENT_PROTOCOL_VERSION)[0] for ledger_id in LEDGER_IDS} diff --git a/plenum/test/freshness/test_view_change_happens_if_ordering_is_halted.py b/plenum/test/freshness/test_view_change_happens_if_ordering_is_halted.py index 1a79a80e33..c526336a4e 100644 --- a/plenum/test/freshness/test_view_change_happens_if_ordering_is_halted.py +++ b/plenum/test/freshness/test_view_change_happens_if_ordering_is_halted.py @@ -3,7 +3,7 @@ from plenum.test.delayers import ppDelay from plenum.test.freshness.helper import has_freshness_instance_change from plenum.test.helper import freshness -from plenum.test.node_request.helper import sdk_ensure_pool_functional +from plenum.test.node_request.helper import vdr_ensure_pool_functional from plenum.test.stasher import delay_rules from stp_core.loop.eventually import eventually @@ -17,7 +17,7 @@ def tconf(tconf): def test_view_change_happens_if_ordering_is_halted(looper, tconf, txnPoolNodeSet, - sdk_wallet_client, sdk_pool_handle): + vdr_wallet_client, vdr_pool_handle): current_view_no = txnPoolNodeSet[0].viewNo for node in txnPoolNodeSet: assert node.viewNo == current_view_no @@ -32,4 +32,4 @@ def check_next_view(): assert sum(1 for node in txnPoolNodeSet if has_freshness_instance_change(node)) >= 3 - sdk_ensure_pool_functional(looper, txnPoolNodeSet, sdk_wallet_client, sdk_pool_handle) + vdr_ensure_pool_functional(looper, txnPoolNodeSet, vdr_wallet_client, vdr_pool_handle) diff --git a/plenum/test/freshness/test_view_change_happens_if_primary_is_slow_to_update_freshness.py b/plenum/test/freshness/test_view_change_happens_if_primary_is_slow_to_update_freshness.py index bdaea72e96..1ca64348c3 100644 --- a/plenum/test/freshness/test_view_change_happens_if_primary_is_slow_to_update_freshness.py +++ b/plenum/test/freshness/test_view_change_happens_if_primary_is_slow_to_update_freshness.py @@ -2,7 +2,7 @@ from plenum.test.freshness.helper import has_freshness_instance_change from plenum.test.helper import freshness -from plenum.test.node_request.helper import sdk_ensure_pool_functional +from plenum.test.node_request.helper import vdr_ensure_pool_functional from stp_core.loop.eventually import eventually FRESHNESS_TIMEOUT = 5 @@ -15,7 +15,7 @@ def tconf(tconf): def test_view_change_happens_if_primary_is_slow_to_update_freshness(looper, tconf, txnPoolNodeSet, - sdk_wallet_client, sdk_pool_handle, + vdr_wallet_client, vdr_pool_handle, monkeypatch): monkeypatch.setattr(txnPoolNodeSet[0].master_replica._freshness_checker, 'freshness_timeout', 3 * FRESHNESS_TIMEOUT) @@ -32,4 +32,4 @@ def check_next_view(): assert sum(1 for node in txnPoolNodeSet if has_freshness_instance_change(node)) >= 3 - sdk_ensure_pool_functional(looper, txnPoolNodeSet, sdk_wallet_client, sdk_pool_handle) + vdr_ensure_pool_functional(looper, txnPoolNodeSet, vdr_wallet_client, vdr_pool_handle) diff --git a/plenum/test/helper.py b/plenum/test/helper.py index a82146559d..dbd99b95e3 100644 --- a/plenum/test/helper.py +++ b/plenum/test/helper.py @@ -14,7 +14,6 @@ import base58 import pytest -from indy.pool import set_protocol_version from common.serializers.serialization import invalid_index_serializer from crypto.bls.bls_factory import BlsFactoryCrypto @@ -29,8 +28,18 @@ import json import asyncio -from indy.ledger import sign_and_submit_request, sign_request, submit_request, build_node_request, \ - multi_sign_request +from indy_vdr import ledger +from indy_vdr import VdrError, VdrErrorCode +from plenum.test.wallet_helper import vdr_sign_and_submit_request, vdr_sign_request, vdr_multi_sign_request +from indy_vdr import set_protocol_version + +from indy.pool import set_protocol_version as set_sdk_protocol_version + +from indy.ledger import sign_and_submit_request as sign_and_submit_sdk_request +from indy.ledger import sign_request as sign_sdk_request +from indy.ledger import submit_request as submit_sdk_request +from indy.ledger import build_node_request as build_sdk_node_request +from indy.ledger import multi_sign_request as multi_sign_sdk_request from indy.error import ErrorCode, IndyError from ledger.genesis_txn.genesis_txn_file_util import genesis_txn_file @@ -83,19 +92,19 @@ def send_reqs_batches_and_get_suff_replies( **kwargs): # This method assumes that `num_reqs` <= num_batches*MaxbatchSize if num_batches == 1: - return sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle, + return vdr_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_client, num_reqs) else: requests = [] for _ in range(num_batches - 1): requests.extend( - sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle, + vdr_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_client, num_reqs // num_batches)) rem = num_reqs % num_batches if rem == 0: rem = num_reqs // num_batches requests.extend( - sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle, + vdr_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_client, rem)) return requests @@ -753,6 +762,7 @@ def check_request_ordered(node, request: Request): def wait_for_requests_ordered(looper, nodes, requests): node_count = len(nodes) timeout_per_request = waits.expectedTransactionExecutionTime(node_count) + timeout_per_request = timeout_per_request * 4 total_timeout = (1 + len(requests) / 10) * timeout_per_request coros = [partial(check_request_ordered, node, @@ -773,7 +783,6 @@ def create_new_test_node(test_node_class, node_config_helper_class, name, conf, cliha=client_ha, bootstrap_cls=bootstrap_cls) - # ####### SDK @@ -804,7 +813,7 @@ def sdk_gen_pool_request(looper, sdk_wallet_new_steward, node_alias, node_did): } req = looper.loop.run_until_complete( - build_node_request(new_steward_did, node_did, json.dumps(data))) + build_sdk_node_request(new_steward_did, node_did, json.dumps(data))) return Request(**json.loads(req)) @@ -819,7 +828,7 @@ def sdk_random_request_objects(count, protocol_version, identifier=None, def sdk_sign_request_objects(looper, sdk_wallet, reqs: Sequence): wallet_h, did = sdk_wallet reqs_str = [json.dumps(req.as_dict) for req in reqs] - reqs = [looper.loop.run_until_complete(sign_request(wallet_h, did, req)) + reqs = [looper.loop.run_until_complete(sign_sdk_request(wallet_h, did, req)) for req in reqs_str] return reqs @@ -828,7 +837,7 @@ def sdk_multi_sign_request_objects(looper, sdk_wallets, reqs: Sequence): reqs_str = [json.dumps(req.as_dict) for req in reqs] for sdk_wallet in sdk_wallets: wallet_h, did = sdk_wallet - reqs_str = [looper.loop.run_until_complete(multi_sign_request(wallet_h, did, req)) + reqs_str = [looper.loop.run_until_complete(multi_sign_sdk_request(wallet_h, did, req)) for req in reqs_str] return reqs_str @@ -836,14 +845,14 @@ def sdk_multi_sign_request_objects(looper, sdk_wallets, reqs: Sequence): def sdk_sign_request_strings(looper, sdk_wallet, reqs: Sequence): wallet_h, did = sdk_wallet reqs_str = [json.dumps(req) for req in reqs] - reqs = [looper.loop.run_until_complete(sign_request(wallet_h, did, req)) + reqs = [looper.loop.run_until_complete(sign_sdk_request(wallet_h, did, req)) for req in reqs_str] return reqs def sdk_multisign_request_object(looper, sdk_wallet, req): wh, did = sdk_wallet - return looper.loop.run_until_complete(multi_sign_request(wh, did, req)) + return looper.loop.run_until_complete(multi_sign_sdk_request(wh, did, req)) def sdk_multisign_request_from_dict(looper, sdk_wallet, op, reqId=None, taa_acceptance=None, endorser=None): @@ -854,7 +863,7 @@ def sdk_multisign_request_from_dict(looper, sdk_wallet, op, reqId=None, taa_acce taaAcceptance=taa_acceptance, endorser=endorser) req_str = json.dumps(request.as_dict) - resp = looper.loop.run_until_complete(multi_sign_request(wh, did, req_str)) + resp = looper.loop.run_until_complete(multi_sign_sdk_request(wh, did, req_str)) return json.loads(resp) @@ -865,15 +874,15 @@ def sdk_signed_random_requests(looper, sdk_wallet, count): return sdk_sign_request_objects(looper, sdk_wallet, reqs_obj) -def sdk_send_signed_requests(pool_h, signed_reqs: Sequence): +def sdk_send_signed_requests(looper, pool_h, signed_reqs: Sequence): return [(json.loads(req), - asyncio.ensure_future(submit_request(pool_h, req))) + asyncio.ensure_future(submit_sdk_request(pool_h, req), loop=looper.loop)) for req in signed_reqs] def sdk_send_random_requests(looper, pool_h, sdk_wallet, count: int): reqs = sdk_signed_random_requests(looper, sdk_wallet, count) - return sdk_send_signed_requests(pool_h, reqs) + return sdk_send_signed_requests(looper, pool_h, reqs) def sdk_send_random_request(looper, pool_h, sdk_wallet): @@ -909,7 +918,7 @@ def sdk_send_random_pool_and_domain_requests(looper, pool_h, sdk_wallet_new_stew def sdk_sign_and_submit_req(pool_handle, sdk_wallet, req): wallet_handle, sender_did = sdk_wallet return json.loads(req), asyncio.ensure_future( - sign_and_submit_request(pool_handle, wallet_handle, sender_did, req)) + sign_and_submit_sdk_request(pool_handle, wallet_handle, sender_did, req)) def sdk_sign_and_submit_req_obj(looper, pool_handle, sdk_wallet, req_obj): @@ -1002,7 +1011,8 @@ def _parse_op(res_dict): def sdk_get_and_check_replies(looper, sdk_req_resp: Sequence, timeout=None): rets = [] - for req_res in sdk_get_replies(looper, sdk_req_resp, timeout): + reqs_res = sdk_get_replies(looper, sdk_req_resp, timeout) + for req_res in reqs_res: sdk_check_reply(req_res) rets.append(req_res) return rets @@ -1098,7 +1108,7 @@ def sdk_sign_request_from_dict(looper, sdk_wallet, op, reqId=None, taa_acceptanc taaAcceptance=taa_acceptance, endorser=endorser) req_str = json.dumps(request.as_dict) - resp = looper.loop.run_until_complete(sign_request(wallet_h, did, req_str)) + resp = looper.loop.run_until_complete(sign_sdk_request(wallet_h, did, req_str)) return json.loads(resp) @@ -1140,6 +1150,423 @@ def sdk_get_bad_response(looper, reqs, exception, message): def sdk_set_protocol_version(looper, version=CURRENT_PROTOCOL_VERSION): + looper.loop.run_until_complete(set_sdk_protocol_version(version)) + + +# ####### VDR + + +def vdr_gen_request(operation, protocol_version=CURRENT_PROTOCOL_VERSION, + identifier=None, reqId=None, **kwargs): + if reqId is None: + reqId = random.randint(10, 1000000000) + json_req = Request(operation=operation, reqId=reqId, + protocolVersion=protocol_version, identifier=identifier, + **kwargs) + req = ledger.build_custom_request(json_req.as_dict) + return req + +def gen_request_plenum(operation, protocol_version=CURRENT_PROTOCOL_VERSION, + identifier=None, **kwargs): + # Question: Why this method is called sdk_gen_request? It does not use + # the indy-sdk + json_req = Request(operation=operation, reqId=random.randint(10, 1000000000), + protocolVersion=protocol_version, identifier=identifier, + **kwargs) + return json_req + +def vdr_gen_pool_request(looper, sdk_wallet_new_steward, node_alias, node_did): + _, new_steward_did = sdk_wallet_new_steward + + node_ip = '{}.{}.{}.{}'.format( + random.randint(1, 240), + random.randint(1, 240), + random.randint(1, 240), + random.randint(1, 240)) + data = { + 'alias': node_alias, + 'client_port': 50001, + 'node_port': 50002, + 'node_ip': node_ip, + 'client_ip': node_ip, + 'services': [] + } + + req = looper.loop.run_until_complete( + ledger.build_node_request(new_steward_did, node_did, json.dumps(data))) + + return Request(**json.loads(req)) + + +def vdr_random_request_objects(count, protocol_version, identifier=None, + **kwargs): + ops = random_requests(count) + reqs = [vdr_gen_request(op, protocol_version=protocol_version, + identifier=identifier, **kwargs) for op in ops] + return reqs + +def vdr_sign_request_objects(looper, sdk_wallet, reqs: Sequence): + wallet_h, did = sdk_wallet + reqs = [looper.loop.run_until_complete(vdr_sign_request(wallet_h, did, req)) + for req in reqs] + return reqs + + +def vdr_multi_sign_request_objects(looper, sdk_wallets, reqs: Sequence): + reqs_str = [json.dumps(req.as_dict) for req in reqs] + for sdk_wallet in sdk_wallets: + wallet_h, did = sdk_wallet + reqs_str = [looper.loop.run_until_complete(vdr_multi_sign_request(wallet_h, did, req)) + for req in reqs_str] + return reqs_str + + +def vdr_sign_request_strings(looper, sdk_wallet, reqs: Sequence): + wallet_h, did = sdk_wallet + reqs_str = [json.dumps(req) for req in reqs] + reqs = [looper.loop.run_until_complete(vdr_sign_request(wallet_h, did, req)) + for req in reqs_str] + return reqs + + +def vdr_multisign_request_object(looper, sdk_wallet, req): + wh, did = sdk_wallet + return looper.loop.run_until_complete(vdr_multi_sign_request(wh, did, req)) + + +def vdr_multisign_request_from_dict(looper, sdk_wallet, op, reqId=None, taa_acceptance=None, endorser=None): + wh, did = sdk_wallet + reqId = reqId or random.randint(10, 100000) + request = Request(operation=op, reqId=reqId, + protocolVersion=CURRENT_PROTOCOL_VERSION, identifier=did, + taaAcceptance=taa_acceptance, + endorser=endorser) + req_str = json.dumps(request.as_dict) + resp = looper.loop.run_until_complete(vdr_multi_sign_request(wh, did, req_str)) + return json.loads(resp) + + +def vdr_signed_random_requests(looper, sdk_wallet, count, protocol_version=CURRENT_PROTOCOL_VERSION): + _, did = sdk_wallet + reqs_obj = vdr_random_request_objects(count, identifier=did, + protocol_version=protocol_version) + return vdr_sign_request_objects(looper, sdk_wallet, reqs_obj) + + +def vdr_send_signed_requests(pool_h, signed_reqs: Sequence, looper): + res = [] + for req in signed_reqs: + req_body = json.loads(req.body) + fut = asyncio.ensure_future(pool_h.submit_action(req), loop=looper.loop) + res.append((req_body, fut)) + return res + + +def vdr_send_random_requests(looper, pool_h, sdk_wallet, count: int): + reqs = vdr_signed_random_requests(looper, sdk_wallet, count) + return vdr_send_signed_requests(pool_h, reqs, looper) + + +def vdr_send_random_request(looper, pool_h, sdk_wallet): + rets = vdr_send_random_requests(looper, pool_h, sdk_wallet, 1) + return rets[0] + + +def vdr_send_random_pool_requests(looper, pool_h, sdk_wallet_new_steward, count: int): + node_alias = random_string(7) + node_did = SimpleSigner(seed=random_string(32).encode()).identifier + + reqs = [vdr_gen_pool_request(looper, sdk_wallet_new_steward, node_alias, node_did) for _ in range(count)] + return [vdr_sign_and_submit_req_obj(looper, pool_h, sdk_wallet_new_steward, req) for req in reqs] + + +def vdr_send_random_pool_and_domain_requests(looper, pool_h, sdk_wallet_new_steward, count: int): + node_alias = random_string(7) + node_did = SimpleSigner(seed=random_string(32).encode()).identifier + + req_gens = [ + lambda: vdr_gen_request(random_requests(1)[0], identifier=sdk_wallet_new_steward[1]), + lambda: vdr_gen_pool_request(looper, sdk_wallet_new_steward, node_alias, node_did), + ] + + res = [] + for i in range(count): + req = req_gens[i % len(req_gens)]() + res.append(vdr_sign_and_submit_req_obj(looper, pool_h, sdk_wallet_new_steward, req)) + looper.runFor(0.1) # Give nodes some time to start ordering, so that requests are really alternating + return res + + +def vdr_sign_and_submit_req(looper, pool_handle, sdk_wallet, req): + wallet_handle, sender_did = sdk_wallet + return json.loads(req), asyncio.ensure_future( + vdr_sign_and_submit_request(pool_handle, wallet_handle, sender_did, req), loop=looper.loop) + + +def vdr_sign_and_submit_req_obj(looper, pool_handle, sdk_wallet, req_obj): + s_req = vdr_sign_request_objects(looper, sdk_wallet, [req_obj])[0] + return vdr_send_signed_requests(pool_handle, [s_req], looper)[0] + + +def vdr_sign_and_submit_op(looper, pool_handle, sdk_wallet, op): + _, did = sdk_wallet + req_obj = vdr_gen_request(op, protocol_version=CURRENT_PROTOCOL_VERSION, + identifier=did) + s_req = vdr_sign_request_objects(looper, sdk_wallet, [req_obj])[0] + return vdr_send_signed_requests(pool_handle, [s_req], looper)[0] + + +def vdr_get_reply(looper, sdk_req_resp, timeout=None): + req_json, resp_task = sdk_req_resp + # TODO: change timeout evaluating logic, when sdk will can tuning timeout from outside + if timeout is None: + timeout = waits.expectedTransactionExecutionTime(7) + try: + resp = looper.run(asyncio.wait_for(resp_task, timeout=timeout)) + if isinstance(resp, dict): + resp = resp[list(resp.keys())[0]] + resp = json.loads(resp) + except VdrError as e: + resp = e.error_code + except TimeoutError as e: + resp = VdrErrorCode.POOL_TIMEOUT + + return req_json, resp + + +# TODO: Check places where sdk_get_replies used without sdk_check_reply +# We need to be sure that test behaviour don't need to check response +# validity +def vdr_get_replies(looper, sdk_req_resp: Sequence, timeout=None): + resp_tasks = [resp for _, resp in sdk_req_resp] + # TODO: change timeout evaluating logic, when sdk will can tuning timeout from outside + if timeout is None: + timeout = waits.expectedTransactionExecutionTime(7) + + def get_res(task, done_list): + if task in done_list: + try: + result = task.result() + if not isinstance(result, dict): + resp = json.loads(result) + else: + resp = result + except VdrError as e: + resp = e.error_code + else: + resp = VdrErrorCode.POOL_TIMEOUT + return resp + timeout = timeout * 4 # temporary fix need to find out if the issue is my machine or since its a delay test it has to crash??? + done, pending = looper.run(asyncio.wait(resp_tasks, timeout=timeout)) + if pending: + for task in pending: + task.cancel() + + ret = [(req, get_res(resp, done)) for req, resp in sdk_req_resp] + return ret + + +def vdr_check_reply(req_res): + req, res = req_res + if isinstance(res, VdrErrorCode): + if res == VdrErrorCode.POOL_TIMEOUT: + raise PoolLedgerTimeoutException('Got PoolLedgerTimeout for request {}' + .format(req)) + else: + raise CommonSdkIOException('Got an error with code {} for request {}' + .format(res, req)) + if not isinstance(res, dict): + raise CommonSdkIOException("Unexpected response format {}".format(res)) + + def _parse_op(res_dict): + if not isinstance(res_dict, dict): + return + if res_dict['op'] == REQNACK: + raise RequestNackedException('ReqNack of id {}. Reason: {}' + .format(req['reqId'], res_dict.get('reason', 'No reason given'))) + if res_dict['op'] == REJECT: + raise RequestRejectedException('Reject of id {}. Reason: {}' + .format(req['reqId'], res_dict.get('reason', 'No reason given'))) + try: + if 'op' in res: + _parse_op(res) + else: + # Check for errors in nested responses + for resps in res.values(): + if isinstance(resps, str): + try: + parsed = json.loads(resps) + _parse_op(parsed) + except (json.JSONDecodeError, TypeError): + # If it's not valid JSON, just ignore it + pass + elif isinstance(resps, dict): + _parse_op(resps) + + except (AttributeError, TypeError, KeyError) as e: + # If we get unexpected format or structure, log it but don't crash + # This is to ensure tests continue to run even if responses are in unexpected formats + print(f"Warning: Error parsing response: {e}, response: {res}") + # Continue execution - if there's a real error, other tests will likely catch it + + +def vdr_get_and_check_replies(looper, sdk_req_resp: Sequence, timeout=None): + rets = [] + reqs_res = vdr_get_replies(looper, sdk_req_resp, timeout) + for req_res in reqs_res: + vdr_check_reply(req_res) + rets.append(req_res) + return rets + + +def vdr_eval_timeout(req_count: int, node_count: int, + customTimeoutPerReq: float = None, add_delay_to_timeout: float = 0): + timeout_per_request = customTimeoutPerReq or waits.expectedTransactionExecutionTime(node_count) + timeout_per_request += add_delay_to_timeout + # here we try to take into account what timeout for execution + # N request - total_timeout should be in + # timeout_per_request < total_timeout < timeout_per_request * N + # we cannot just take (timeout_per_request * N) because it is so huge. + # (for timeout_per_request=5 and N=10, total_timeout=50sec) + # lets start with some simple formula: + return (1 + req_count / 10) * timeout_per_request + + +def vdr_send_and_check(signed_reqs, looper, txnPoolNodeSet, pool_h, timeout=None): + if not timeout: + timeout = vdr_eval_timeout(len(signed_reqs), len(txnPoolNodeSet)) + results = vdr_send_signed_requests(pool_h, signed_reqs, looper) + sdk_replies = vdr_get_replies(looper, results, timeout=timeout) + for req_res in sdk_replies: + vdr_check_reply(req_res) + return sdk_replies + + +def vdr_send_random_and_check(looper, txnPoolNodeSet, sdk_pool, sdk_wallet, count, + customTimeoutPerReq: float = None, add_delay_to_timeout: float = 0, + override_timeout_limit=False, total_timeout=None): + sdk_reqs = vdr_send_random_requests(looper, sdk_pool, sdk_wallet, count) + if not total_timeout: + total_timeout = vdr_eval_timeout(len(sdk_reqs), len(txnPoolNodeSet), + customTimeoutPerReq=customTimeoutPerReq, + add_delay_to_timeout=add_delay_to_timeout) + sdk_replies = vdr_get_replies(looper, sdk_reqs, timeout=total_timeout) + for req_res in sdk_replies: + vdr_check_reply(req_res) + return sdk_replies + + +def vdr_send_batches_of_random_and_check(looper, txnPoolNodeSet, sdk_pool, sdk_wallet, + num_reqs, num_batches=1, **kwargs): + # This method assumes that `num_reqs` <= num_batches*MaxbatchSize + if num_reqs < num_batches: + raise BaseException( + 'sdk_send_batches_of_random_and_check method assumes that `num_reqs` <= num_batches*MaxbatchSize') + if num_batches == 1: + return vdr_send_random_and_check(looper, txnPoolNodeSet, sdk_pool, sdk_wallet, num_reqs, **kwargs) + + reqs_in_batch = num_reqs // num_batches + reqs_in_last_batch = reqs_in_batch + num_reqs % num_batches + + sdk_replies = [] + for _ in range(num_batches - 1): + sdk_replies.extend(vdr_send_random_and_check(looper, txnPoolNodeSet, + sdk_pool, sdk_wallet, + reqs_in_batch, **kwargs)) + sdk_replies.extend(vdr_send_random_and_check(looper, txnPoolNodeSet, + sdk_pool, sdk_wallet, + reqs_in_last_batch, **kwargs)) + return sdk_replies + + +def vdr_send_batches_of_random(looper, txnPoolNodeSet, sdk_pool, sdk_wallet, + num_reqs, num_batches=1, timeout=Max3PCBatchWait): + if num_reqs < num_batches: + raise BaseException( + 'sdk_send_batches_of_random_and_check method assumes that `num_reqs` <= num_batches*MaxbatchSize') + if num_batches == 1: + sdk_reqs = vdr_send_random_requests(looper, sdk_pool, sdk_wallet, num_reqs) + looper.runFor(timeout) + return sdk_reqs + + reqs_in_batch = num_reqs // num_batches + reqs_in_last_batch = reqs_in_batch + num_reqs % num_batches + + sdk_reqs = [] + for _ in range(num_batches - 1): + sdk_reqs.extend(vdr_send_random_requests(looper, sdk_pool, sdk_wallet, reqs_in_batch)) + looper.runFor(timeout) + sdk_reqs.extend(vdr_send_random_requests(looper, sdk_pool, sdk_wallet, reqs_in_last_batch)) + looper.runFor(timeout) + return sdk_reqs + + +def vdr_sign_request_from_dict(looper, sdk_wallet, op, reqId=None, taa_acceptance=None, endorser=None): + wallet_h, did = sdk_wallet + reqId = reqId or random.randint(10, 100000) + request = Request(operation=op, reqId=random.randint(10, 1000000000), + protocolVersion=CURRENT_PROTOCOL_VERSION, identifier=did) + req = ledger.build_custom_request(request.as_dict) + resp = looper.loop.run_until_complete(vdr_sign_request(wallet_h, did, req)) + return resp + +def generate_invalid_unsigned_plenum_request(sdk_wallet, op, reqID=None, taa_acceptance=None, endorser=None): + _, did = sdk_wallet + reqId = reqID or random.randint(10, 100000) + request = Request(operation=op, reqId=reqId, + protocolVersion=CURRENT_PROTOCOL_VERSION, identifier=did, + taaAcceptance=taa_acceptance, + endorser=endorser) + return request + + +def vdr_check_request_is_not_returned_to_nodes(looper, nodeSet, request): + instances = range(getNoInstances(len(nodeSet))) + coros = [] + for node, inst_id in itertools.product(nodeSet, instances): + c = partial(checkRequestNotReturnedToNode, + node=node, + identifier=request['identifier'], + reqId=request['reqId'], + instId=inst_id + ) + coros.append(c) + timeout = waits.expectedTransactionExecutionTime(len(nodeSet)) + looper.run(eventuallyAll(*coros, retryWait=1, totalTimeout=timeout)) + + +def vdr_json_to_request_object(json_req): + json_req = Request(identifier=json_req.get('identifier', None), + reqId=json_req['reqId'], + operation=json_req['operation'], + signature=json_req['signature'] if 'signature' in json_req else None, + protocolVersion=json_req['protocolVersion'] if 'protocolVersion' in json_req else None, + taaAcceptance=json_req.get('taaAcceptance', None)) + return ledger.build_custom_request(json_req.as_dict) + +def vdr_json_to_plenum_request_object(json_req): + json_req = Request(identifier=json_req.get('identifier', None), + reqId=json_req['reqId'], + operation=json_req['operation'], + signature=json_req['signature'] if 'signature' in json_req else None, + protocolVersion=json_req['protocolVersion'] if 'protocolVersion' in json_req else None, + taaAcceptance=json_req.get('taaAcceptance', None)) + return json_req + +def vdr_json_couples_to_request_list(json_couples): + req_list = [] + for json_couple in json_couples: + req_list.append(vdr_json_to_plenum_request_object(json_couple[0])) + return req_list + + +def vdr_get_bad_response(looper, reqs, exception, message): + with pytest.raises(exception) as e: + vdr_get_and_check_replies(looper, reqs) + assert message in e._excinfo[1].args[0] + + +def vdr_set_protocol_version(looper, version=CURRENT_PROTOCOL_VERSION): looper.loop.run_until_complete(set_protocol_version(version)) diff --git a/plenum/test/input_validation/message_validation/test_batch_committed.py b/plenum/test/input_validation/message_validation/test_batch_committed.py index e3e9d5de4c..b3b4c098b3 100644 --- a/plenum/test/input_validation/message_validation/test_batch_committed.py +++ b/plenum/test/input_validation/message_validation/test_batch_committed.py @@ -7,7 +7,7 @@ LedgerIdField, NonNegativeNumberField, MerkleRootField, TimestampField, LimitedLengthStringField from plenum.common.messages.node_messages import BatchCommitted from plenum.common.util import get_utc_epoch -from plenum.test.helper import sdk_random_request_objects, generate_state_root +from plenum.test.helper import vdr_random_request_objects, generate_state_root EXPECTED_ORDERED_FIELDS = OrderedDict([ ("requests", IterableField), @@ -30,7 +30,7 @@ def create_valid_batch_committed(): reqs = [req.as_dict for req in - sdk_random_request_objects(10, identifier="1" * 16, protocol_version=CURRENT_PROTOCOL_VERSION)] + vdr_random_request_objects(10, identifier="1" * 16, protocol_version=CURRENT_PROTOCOL_VERSION)] return BatchCommitted(reqs, DOMAIN_LEDGER_ID, 0, diff --git a/plenum/test/input_validation/test_handle_one_node_message.py b/plenum/test/input_validation/test_handle_one_node_message.py index 893579bf0c..d851e064c5 100644 --- a/plenum/test/input_validation/test_handle_one_node_message.py +++ b/plenum/test/input_validation/test_handle_one_node_message.py @@ -1,8 +1,8 @@ from plenum.common.messages.node_messages import Batch, InstanceChange -def test_unpack_node_msg_with_str_as_msg_in_batch(create_node_and_not_start): - node = create_node_and_not_start +def test_unpack_node_msg_with_str_as_msg_in_batch(vdr_create_node_and_not_start): + node = vdr_create_node_and_not_start while node.nodeInBox: node.nodeInBox.pop() batch = Batch(['pi', diff --git a/plenum/test/instances/test_msgs_from_slow_instances.py b/plenum/test/instances/test_msgs_from_slow_instances.py index 188326b91f..a830572283 100644 --- a/plenum/test/instances/test_msgs_from_slow_instances.py +++ b/plenum/test/instances/test_msgs_from_slow_instances.py @@ -1,5 +1,5 @@ import pytest -from plenum.test.helper import sdk_send_random_request +from plenum.test.helper import vdr_send_random_request from stp_core.loop.eventually import eventually from plenum.common.messages.node_messages import Commit @@ -26,10 +26,10 @@ def configNodeSet(txnPoolNodeSet): def testMsgFromInstanceDelay(configNodeSet, looper, - sdk_pool_handle, sdk_wallet_client): + vdr_pool_handle, vdr_wallet_client): A, B, C, D = configNodeSet - sdk_send_random_request(looper, sdk_pool_handle, sdk_wallet_client) + vdr_send_random_request(looper, vdr_pool_handle, vdr_wallet_client) def getCommits(node: TestNode, instId: int): replica = node.replicas[instId] # type: Replica diff --git a/plenum/test/ledger/conftest.py b/plenum/test/ledger/conftest.py index 5d622b2c77..19047d1ff3 100644 --- a/plenum/test/ledger/conftest.py +++ b/plenum/test/ledger/conftest.py @@ -2,22 +2,22 @@ from plenum.common.constants import DOMAIN_LEDGER_ID from plenum.common.txn_util import reqToTxn -from plenum.test.helper import sdk_signed_random_requests +from plenum.test.helper import vdr_signed_random_requests NUM_BATCHES = 3 TXNS_IN_BATCH = 5 def create_txns(looper, sdk_wallet_client, count=TXNS_IN_BATCH): - reqs = sdk_signed_random_requests(looper, sdk_wallet_client, count) + reqs = vdr_signed_random_requests(looper, sdk_wallet_client, count) return [reqToTxn(req) for req in reqs] @pytest.fixture(scope='module') -def created_txns(ledger, looper, sdk_wallet_client): +def created_txns(ledger, looper, vdr_wallet_client): txns = [] for i in range(NUM_BATCHES): - txns.append(create_txns(looper, sdk_wallet_client, TXNS_IN_BATCH)) + txns.append(create_txns(looper, vdr_wallet_client, TXNS_IN_BATCH)) return txns diff --git a/plenum/test/ledger/test_ledger_add_txns.py b/plenum/test/ledger/test_ledger_add_txns.py index 7f7ca58255..7da1662bd3 100644 --- a/plenum/test/ledger/test_ledger_add_txns.py +++ b/plenum/test/ledger/test_ledger_add_txns.py @@ -4,8 +4,8 @@ def test_append_seq_no(ledger, - looper, sdk_wallet_client): - txns = create_txns(looper, sdk_wallet_client) + looper, vdr_wallet_client): + txns = create_txns(looper, vdr_wallet_client) seq_no = 10 txns = ledger._append_seq_no(txns, seq_no) for txn in txns: @@ -14,8 +14,8 @@ def test_append_seq_no(ledger, def test_append_seq_no_when_adding(ledger, - looper, sdk_wallet_client): - txns = create_txns(looper, sdk_wallet_client) + looper, vdr_wallet_client): + txns = create_txns(looper, vdr_wallet_client) seq_no = ledger.seqNo for txn in txns: seq_no += 1 @@ -25,8 +25,8 @@ def test_append_seq_no_when_adding(ledger, def test_add_result(ledger, - looper, sdk_wallet_client): - txn = create_txns(looper, sdk_wallet_client)[0] + looper, vdr_wallet_client): + txn = create_txns(looper, vdr_wallet_client)[0] res = ledger.add(txn) assert F.seqNo.name not in res assert F.auditPath.name in res diff --git a/plenum/test/ledger/test_ledger_append_txns.py b/plenum/test/ledger/test_ledger_append_txns.py index 43028adbbe..c927444239 100644 --- a/plenum/test/ledger/test_ledger_append_txns.py +++ b/plenum/test/ledger/test_ledger_append_txns.py @@ -6,8 +6,8 @@ from plenum.test.ledger.conftest import NUM_BATCHES, TXNS_IN_BATCH, create_txns -def test_ledger_appendTxns_args(ledger, looper, sdk_wallet_client): - txns = create_txns(looper, sdk_wallet_client) +def test_ledger_appendTxns_args(ledger, looper, vdr_wallet_client): + txns = create_txns(looper, vdr_wallet_client) # None seq_no txns[0][TXN_METADATA][TXN_METADATA_SEQ_NO] = None diff --git a/plenum/test/ledger/test_ledger_append_txns_result.py b/plenum/test/ledger/test_ledger_append_txns_result.py index 581867cc82..c76e333cdb 100644 --- a/plenum/test/ledger/test_ledger_append_txns_result.py +++ b/plenum/test/ledger/test_ledger_append_txns_result.py @@ -10,16 +10,16 @@ def test_append_empty(ledger): def test_append_result(ledger, - looper, sdk_wallet_client): + looper, vdr_wallet_client): size = ledger.seqNo - txns1 = create_txns(looper, sdk_wallet_client) + txns1 = create_txns(looper, vdr_wallet_client) ledger.append_txns_metadata(txns1) (start, end), appended_txns = ledger.appendTxns(txns1) assert start == size + 1 assert end == size + TXNS_IN_BATCH assert len(appended_txns) == TXNS_IN_BATCH - txns2 = create_txns(looper, sdk_wallet_client) + txns2 = create_txns(looper, vdr_wallet_client) ledger.append_txns_metadata(txns2) (start, end), appended_txns = ledger.appendTxns(txns2) assert start == size + 1 + TXNS_IN_BATCH diff --git a/plenum/test/ledger/test_ledger_commit_txns.py b/plenum/test/ledger/test_ledger_commit_txns.py index 3cad9a7737..3924567efd 100644 --- a/plenum/test/ledger/test_ledger_commit_txns.py +++ b/plenum/test/ledger/test_ledger_commit_txns.py @@ -15,13 +15,13 @@ def test_commit_empty(ledger, inital_root_hash): def test_commit_txns(ledger, - looper, sdk_wallet_client): - txns1 = create_txns(looper, sdk_wallet_client) + looper, vdr_wallet_client): + txns1 = create_txns(looper, vdr_wallet_client) ledger.append_txns_metadata(txns1) ledger.appendTxns(txns1) root1 = ledger.uncommittedRootHash - txns2 = create_txns(looper, sdk_wallet_client) + txns2 = create_txns(looper, vdr_wallet_client) ledger.append_txns_metadata(txns2) ledger.appendTxns(txns2) root2 = ledger.uncommittedRootHash diff --git a/plenum/test/ledger/test_ledger_discard_txns.py b/plenum/test/ledger/test_ledger_discard_txns.py index 15ddc7d5ec..9a29eee775 100644 --- a/plenum/test/ledger/test_ledger_discard_txns.py +++ b/plenum/test/ledger/test_ledger_discard_txns.py @@ -22,13 +22,13 @@ def test_discard_empty_no_uncommitted(ledger): def test_discard_txns(ledger, - looper, sdk_wallet_client): - txns1 = create_txns(looper, sdk_wallet_client) + looper, vdr_wallet_client): + txns1 = create_txns(looper, vdr_wallet_client) ledger.append_txns_metadata(txns1) ledger.appendTxns(txns1) root1 = ledger.uncommittedRootHash - txns2 = create_txns(looper, sdk_wallet_client) + txns2 = create_txns(looper, vdr_wallet_client) ledger.append_txns_metadata(txns2) ledger.appendTxns(txns2) root2 = ledger.uncommittedRootHash diff --git a/plenum/test/ledger/test_ledger_get_last_txn.py b/plenum/test/ledger/test_ledger_get_last_txn.py index d1ce91772c..72c6151bf5 100644 --- a/plenum/test/ledger/test_ledger_get_last_txn.py +++ b/plenum/test/ledger/test_ledger_get_last_txn.py @@ -12,14 +12,14 @@ def ledger(txnPoolNodeSet): def test_get_last_txn(ledger, - looper, sdk_wallet_client): + looper, vdr_wallet_client): # add 2 txns uncommitted - txn1 = create_txns(looper, sdk_wallet_client, 1)[0] + txn1 = create_txns(looper, vdr_wallet_client, 1)[0] ledger.append_txns_metadata([txn1]) expected_txn1 = deepcopy(txn1) ledger.appendTxns([txn1]) - txn2 = create_txns(looper, sdk_wallet_client, 1)[0] + txn2 = create_txns(looper, vdr_wallet_client, 1)[0] ledger.append_txns_metadata([txn2]) expected_last_added_txn = deepcopy(txn2) expected_txn2 = deepcopy(txn2) diff --git a/plenum/test/logging/test_logging_txn_state.py b/plenum/test/logging/test_logging_txn_state.py index 09d47e5234..2b1c456bb5 100644 --- a/plenum/test/logging/test_logging_txn_state.py +++ b/plenum/test/logging/test_logging_txn_state.py @@ -11,11 +11,11 @@ from plenum.common.constants import DOMAIN_LEDGER_ID, STEWARD_STRING -from plenum.test.pool_transactions.helper import prepare_nym_request, \ - sdk_sign_and_send_prepared_request +from plenum.test.pool_transactions.helper import vdr_prepare_nym_request, \ + vdr_sign_and_send_prepared_request from plenum.test import waits -from plenum.test.helper import sdk_send_random_and_check, \ - sdk_get_and_check_replies, get_key_from_req +from plenum.test.helper import vdr_send_random_and_check, \ + vdr_get_and_check_replies, get_key_from_req from stp_core.common.log import Logger import logging @@ -31,14 +31,14 @@ def testLoggingTxnStateForValidRequest( looper, logsearch, txnPoolNodeSet, - sdk_pool_handle, sdk_wallet_client): + vdr_pool_handle, vdr_wallet_client): logsPropagate, _ = logsearch(files=['propagator.py'], funcs=['propagate'], msgs=['propagating.*request.*from client']) logsOrdered, _ = logsearch(files=['ordering_service.py'], funcs=['_order_3pc_key'], msgs=['ordered batch request']) logsCommited, _ = logsearch(files=['node.py'], funcs=['executeBatch'], msgs=['committed batch request']) - reqs = sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle, - sdk_wallet_client, 1) + reqs = vdr_send_random_and_check(looper, txnPoolNodeSet, vdr_pool_handle, + vdr_wallet_client, 1) req, _ = reqs[0] key = get_key_from_req(req) @@ -48,24 +48,24 @@ def testLoggingTxnStateForValidRequest( def testLoggingTxnStateForInvalidRequest( - looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_client, logsearch): + looper, txnPoolNodeSet, vdr_pool_handle, vdr_wallet_client, logsearch): logsPropagate, _ = logsearch(files=['propagator.py'], funcs=['propagate'], msgs=['propagating.*request.*from client']) logsReject, _ = logsearch(files=['ordering_service.py'], funcs=['_consume_req_queue_for_pre_prepare'], msgs=['encountered exception.*while processing.*will reject']) seed = randomString(32) - wh, _ = sdk_wallet_client + wh, _ = vdr_wallet_client nym_request, _ = looper.loop.run_until_complete( - prepare_nym_request(sdk_wallet_client, seed, + vdr_prepare_nym_request(vdr_wallet_client, seed, "name", STEWARD_STRING)) - request_couple = sdk_sign_and_send_prepared_request(looper, sdk_wallet_client, - sdk_pool_handle, nym_request) + request_couple = vdr_sign_and_send_prepared_request(looper, vdr_wallet_client, + vdr_pool_handle, nym_request) with pytest.raises(RequestRejectedException) as e: - sdk_get_and_check_replies(looper, [request_couple]) + vdr_get_and_check_replies(looper, [request_couple]) assert 'Only Steward is allowed to do these transactions' in e._excinfo[1].args[0] request = request_couple[0] @@ -76,7 +76,7 @@ def testLoggingTxnStateForInvalidRequest( def testLoggingTxnStateWhenCommitFails( - looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_steward, logsearch): + looper, txnPoolNodeSet, vdr_pool_handle, vdr_wallet_steward, logsearch): logsPropagate, _ = logsearch(files=['propagator.py'], funcs=['propagate'], msgs=['propagating.*request.*from client']) logsOrdered, _ = logsearch(files=['ordering_service.py'], funcs=['_order_3pc_key'], msgs=['ordered batch request']) @@ -84,14 +84,14 @@ def testLoggingTxnStateWhenCommitFails( msgs=['commit failed for batch request']) seed = randomString(32) - wh, _ = sdk_wallet_steward + wh, _ = vdr_wallet_steward nym_request, _ = looper.loop.run_until_complete( - prepare_nym_request(sdk_wallet_steward, seed, + vdr_prepare_nym_request(vdr_wallet_steward, seed, "name", None)) - req_couple = sdk_sign_and_send_prepared_request(looper, sdk_wallet_steward, - sdk_pool_handle, nym_request) + req_couple = vdr_sign_and_send_prepared_request(looper, vdr_wallet_steward, + vdr_pool_handle, nym_request) class SomeError(Exception): pass diff --git a/plenum/test/metrics/test_metrics_config.py b/plenum/test/metrics/test_metrics_config.py index f137651093..12ac81cd97 100644 --- a/plenum/test/metrics/test_metrics_config.py +++ b/plenum/test/metrics/test_metrics_config.py @@ -1,7 +1,7 @@ import pytest from plenum.common.metrics_collector import KvStoreMetricsFormat, MetricsName, TMP_METRIC -from plenum.test.helper import sdk_send_random_and_check, max_3pc_batch_limits, assertExp +from plenum.test.helper import vdr_send_random_and_check, max_3pc_batch_limits, assertExp from storage.helper import initKeyValueStorage from stp_core.loop.eventually import eventually @@ -18,13 +18,13 @@ def tconf(tconf): tconf.METRICS_FLUSH_INTERVAL = old_metrics_flush_interval -def test_kv_store_metrics_config(looper, txnPoolNodeSet, tdir, tconf, sdk_pool_handle, sdk_wallet_client): +def test_kv_store_metrics_config(looper, txnPoolNodeSet, tdir, tconf, vdr_pool_handle, vdr_wallet_client): total_time = 1.5 * tconf.PerfCheckFreq total_iters = 5 iter_time = total_time / total_iters for _ in range(total_iters): - sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_client, 15) + vdr_send_random_and_check(looper, txnPoolNodeSet, vdr_pool_handle, vdr_wallet_client, 15) looper.runFor(iter_time) for node in txnPoolNodeSet: diff --git a/plenum/test/monitoring/conftest.py b/plenum/test/monitoring/conftest.py index c408b76b01..f853350878 100644 --- a/plenum/test/monitoring/conftest.py +++ b/plenum/test/monitoring/conftest.py @@ -5,16 +5,16 @@ from plenum.common.average_strategies import MedianLowStrategy from plenum.server.instances import Instances from plenum.server.monitor import Monitor -from plenum.test.helper import sdk_eval_timeout, sdk_send_random_request, sdk_get_reply +from plenum.test.helper import vdr_eval_timeout, vdr_send_random_request, vdr_get_reply from plenum.test.testing_utils import FakeSomething @pytest.fixture() -def requests(looper, sdk_wallet_client, sdk_pool_handle): +def requests(looper, vdr_wallet_client, vdr_pool_handle): requests = [] for i in range(5): - req = sdk_send_random_request(looper, sdk_pool_handle, sdk_wallet_client) - req, _ = sdk_get_reply(looper, req, timeout=sdk_eval_timeout(1, 4)) + req = vdr_send_random_request(looper, vdr_pool_handle, vdr_wallet_client) + req, _ = vdr_get_reply(looper, req, timeout=vdr_eval_timeout(1, 4)) requests.append(req) return requests diff --git a/plenum/test/monitoring/test_avg_latency.py b/plenum/test/monitoring/test_avg_latency.py index 264a71ed25..e9131bfbf8 100644 --- a/plenum/test/monitoring/test_avg_latency.py +++ b/plenum/test/monitoring/test_avg_latency.py @@ -1,7 +1,7 @@ import pytest from stp_core.common.log import getlogger -from plenum.test.helper import sdk_send_random_and_check +from plenum.test.helper import vdr_send_random_and_check nodeCount = 4 logger = getlogger() @@ -16,13 +16,13 @@ def tconf(tconf): tconf.MIN_LATENCY_COUNT = old_min_cnt @pytest.mark.skip(reason="Not used now") -def testAvgReqLatency(looper, tconf, txnPoolNodeSet, sdk_wallet_client, sdk_pool_handle): +def testAvgReqLatency(looper, tconf, txnPoolNodeSet, vdr_wallet_client, vdr_pool_handle): """ Checking if average latency is being set """ - _, wallet_did = sdk_wallet_client + _, wallet_did = vdr_wallet_client for i in range(txnCount): - sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_client, 1) + vdr_send_random_and_check(looper, txnPoolNodeSet, vdr_pool_handle, vdr_wallet_client, 1) for node in txnPoolNodeSet: # type: Node mLat = node.monitor.getAvgLatencyForClient(wallet_did, diff --git a/plenum/test/monitoring/test_backup_throughput_measurement.py b/plenum/test/monitoring/test_backup_throughput_measurement.py index 2b4cf07762..c5973ad043 100644 --- a/plenum/test/monitoring/test_backup_throughput_measurement.py +++ b/plenum/test/monitoring/test_backup_throughput_measurement.py @@ -2,8 +2,8 @@ from plenum.common.average_strategies import MedianLowStrategy, MedianMediumStrategy from plenum.common.throughput_measurements import RevivalSpikeResistantEMAThroughputMeasurement -from plenum.test.helper import sdk_send_random_and_check -from plenum.test.pool_transactions.helper import sdk_pool_refresh +from plenum.test.helper import vdr_send_random_and_check +from plenum.test.pool_transactions.helper import vdr_pool_refresh from stp_core.loop.eventually import eventually nodeCount = 8 @@ -35,15 +35,15 @@ def tconf(tconf): tconf.throughput_measurement_params = old_throughput_measurement_params -def test_backup_throughput_measurement(looper, sdk_pool_handle, txnPoolNodeSet, - sdk_wallet_steward, tdir, tconf, allPluginsPath): +def test_backup_throughput_measurement(looper, vdr_pool_handle, txnPoolNodeSet, + vdr_wallet_steward, tdir, tconf, allPluginsPath): # 8 nodes, so f == 2 and replicas == 3 looper.runFor(tconf.throughput_measurement_params['window_size'] * tconf.throughput_measurement_params['min_cnt']) # Send some txns - sdk_send_random_and_check(looper, txnPoolNodeSet, - sdk_pool_handle, sdk_wallet_steward, 1) + vdr_send_random_and_check(looper, txnPoolNodeSet, + vdr_pool_handle, vdr_wallet_steward, 1) # Stop backup primaries for node in txnPoolNodeSet[1:3]: @@ -52,8 +52,8 @@ def test_backup_throughput_measurement(looper, sdk_pool_handle, txnPoolNodeSet, node.stop() # Send more txns so that master replica got more throughput - sdk_send_random_and_check(looper, txnPoolNodeSet, - sdk_pool_handle, sdk_wallet_steward, int(2 / tconf.DELTA)) + vdr_send_random_and_check(looper, txnPoolNodeSet, + vdr_pool_handle, vdr_wallet_steward, int(2 / tconf.DELTA)) def chk(): assert len(txnPoolNodeSet[0].monitor.areBackupsDegraded()) == 2 diff --git a/plenum/test/monitoring/test_instance_change_with_Delta.py b/plenum/test/monitoring/test_instance_change_with_Delta.py index 9ed992d12b..48e8a7f204 100644 --- a/plenum/test/monitoring/test_instance_change_with_Delta.py +++ b/plenum/test/monitoring/test_instance_change_with_Delta.py @@ -5,7 +5,7 @@ from stp_core.common.util import adict from plenum.server.node import Node from plenum.test import waits -from plenum.test.helper import sdk_send_random_and_check +from plenum.test.helper import vdr_send_random_and_check from plenum.test.malicious_behaviors_node import slow_primary from plenum.test.test_node import getPrimaryReplica from plenum.test.view_change.helper import provoke_and_wait_for_view_change @@ -66,7 +66,7 @@ def ensureAnotherPerfCheck(): @pytest.fixture(scope="module") -def step1(looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_client): +def step1(looper, txnPoolNodeSet, vdr_pool_handle, vdr_wallet_client): startedNodes = txnPoolNodeSet """ stand up a pool of nodes and send 5 requests to client @@ -74,7 +74,7 @@ def step1(looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_client): # the master instance has a primary replica, call it P P = getPrimaryReplica(startedNodes) - sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_client, 5) + vdr_send_random_and_check(looper, txnPoolNodeSet, vdr_pool_handle, vdr_wallet_client, 5) # profile_this(sendReqsToNodesAndVerifySuffReplies, looper, client1, 5) return adict(P=P, @@ -116,11 +116,11 @@ def step3(step2): @pytest.mark.skip(reason="SOV-1123 - fails intermittently") -def testInstChangeWithLowerRatioThanDelta(looper, step3, sdk_pool_handle, sdk_wallet_client): +def testInstChangeWithLowerRatioThanDelta(looper, step3, vdr_pool_handle, vdr_wallet_client): # from plenum.test.test_node import ensureElectionsDone # ensureElectionsDone(looper, []) - sdk_send_random_and_check(looper, step3.nodes, sdk_pool_handle, sdk_wallet_client, 9) + vdr_send_random_and_check(looper, step3.nodes, vdr_pool_handle, vdr_wallet_client, 9) # wait for every node to run another checkPerformance waitForNextPerfCheck(looper, step3.nodes, step3.perfChecks) - provoke_and_wait_for_view_change(looper, step3.nodes, 1, sdk_pool_handle, sdk_wallet_client) + provoke_and_wait_for_view_change(looper, step3.nodes, 1, vdr_pool_handle, vdr_wallet_client) diff --git a/plenum/test/monitoring/test_instance_change_with_req_Lambda.py b/plenum/test/monitoring/test_instance_change_with_req_Lambda.py index 7517456527..7173f52117 100644 --- a/plenum/test/monitoring/test_instance_change_with_req_Lambda.py +++ b/plenum/test/monitoring/test_instance_change_with_req_Lambda.py @@ -1,7 +1,7 @@ import pytest from plenum.common.messages.node_messages import PrePrepare -from plenum.test.helper import sdk_send_random_and_check +from plenum.test.helper import vdr_send_random_and_check from plenum.test.helper import waitForViewChange from plenum.test.spy_helpers import getAllReturnVals from plenum.test.test_node import getPrimaryReplica @@ -37,9 +37,9 @@ def tconf(tconf): @pytest.fixture() -def setup(looper, tconf, txnPoolNodeSet, sdk_wallet_client, sdk_pool_handle): - sdk_send_random_and_check(looper, txnPoolNodeSet, - sdk_pool_handle, sdk_wallet_client, 5) +def setup(looper, tconf, txnPoolNodeSet, vdr_wallet_client, vdr_pool_handle): + vdr_send_random_and_check(looper, txnPoolNodeSet, + vdr_pool_handle, vdr_wallet_client, 5) P = getPrimaryReplica(txnPoolNodeSet) # set LAMBDA smaller than the production config to make the test faster @@ -65,8 +65,8 @@ def specificPrePrepare(msg): P.outBoxTestStasher.delay(specificPrePrepare) # TODO select or create a timeout for this case in 'waits' - sdk_send_random_and_check(looper, txnPoolNodeSet, - sdk_pool_handle, sdk_wallet_client, 5, + vdr_send_random_and_check(looper, txnPoolNodeSet, + vdr_pool_handle, vdr_wallet_client, 5, customTimeoutPerReq=tconf.TestRunningTimeLimitSec) return adict(nodes=txnPoolNodeSet, old_view_no=old_view_no) diff --git a/plenum/test/monitoring/test_invalid_reqs_in_monitor.py b/plenum/test/monitoring/test_invalid_reqs_in_monitor.py index a648323dba..eaf4fcc657 100644 --- a/plenum/test/monitoring/test_invalid_reqs_in_monitor.py +++ b/plenum/test/monitoring/test_invalid_reqs_in_monitor.py @@ -3,7 +3,7 @@ import pytest from plenum.common.exceptions import InvalidClientMessageException, RequestRejectedException -from plenum.test.helper import sdk_send_random_and_check +from plenum.test.helper import vdr_send_random_and_check COUNT_VALID_REQS = 1 COUNT_INVALID_REQS = 2 @@ -23,21 +23,21 @@ def check_count_reqs(nodes): def test_invalid_reqs(looper, txnPoolNodeSet, - sdk_wallet_steward, - sdk_pool_handle): + vdr_wallet_steward, + vdr_pool_handle): """Send 1 valid request and 2 invalid. Then checked, that all 3 requests are stored into monitor.""" - sdk_send_random_and_check(looper, + vdr_send_random_and_check(looper, txnPoolNodeSet, - sdk_pool_handle, - sdk_wallet_steward, + vdr_pool_handle, + vdr_wallet_steward, COUNT_VALID_REQS) for node in txnPoolNodeSet: node.master_replica._ordering_service._do_dynamic_validation = \ functools.partial(randomDynamicValidation, node) with pytest.raises(RequestRejectedException, match='not valid req'): - sdk_send_random_and_check(looper, + vdr_send_random_and_check(looper, txnPoolNodeSet, - sdk_pool_handle, - sdk_wallet_steward, + vdr_pool_handle, + vdr_wallet_steward, COUNT_INVALID_REQS) check_count_reqs(txnPoolNodeSet) diff --git a/plenum/test/monitoring/test_no_check_if_no_new_requests.py b/plenum/test/monitoring/test_no_check_if_no_new_requests.py index 9cf7e2f188..5357ca8539 100644 --- a/plenum/test/monitoring/test_no_check_if_no_new_requests.py +++ b/plenum/test/monitoring/test_no_check_if_no_new_requests.py @@ -1,5 +1,5 @@ from plenum.test.view_change.conftest import perf_chk_patched -from plenum.test.helper import sdk_send_random_and_check +from plenum.test.helper import vdr_send_random_and_check # Perf check is invoked manually in this test so prevent scheduled perf check # as it affects the result of subsequent manual perf checks @@ -9,7 +9,7 @@ def test_not_check_if_no_new_requests(perf_chk_patched, looper, txnPoolNodeSet, - sdk_wallet_client, sdk_pool_handle): + vdr_wallet_client, vdr_pool_handle): """ Checks that node does not do performance check if there were no new requests since previous check @@ -29,9 +29,9 @@ def test_not_check_if_no_new_requests(perf_chk_patched, looper, txnPoolNodeSet, # Send new request and check that after it nodes can do # performance check again num_requests = 1 - sdk_send_random_and_check(looper, txnPoolNodeSet, - sdk_pool_handle, - sdk_wallet_client, + vdr_send_random_and_check(looper, txnPoolNodeSet, + vdr_pool_handle, + vdr_wallet_client, num_requests) for node in txnPoolNodeSet: assert node.checkPerformance() is not None diff --git a/plenum/test/monitoring/test_post_monitoring_stats.py b/plenum/test/monitoring/test_post_monitoring_stats.py index 00f6281743..bd91ba55c5 100644 --- a/plenum/test/monitoring/test_post_monitoring_stats.py +++ b/plenum/test/monitoring/test_post_monitoring_stats.py @@ -1,7 +1,7 @@ import pytest from plenum.server.monitor import Monitor -from plenum.test.helper import sdk_send_random_and_check +from plenum.test.helper import vdr_send_random_and_check from plenum.test.node_catchup.helper import ensure_all_nodes_have_same_data WIND_SIZE = 5 @@ -28,13 +28,13 @@ def testPostingThroughput(postingStatsEnabled, decreasedMonitoringTimeouts, looper, txnPoolNodeSet, - sdk_wallet_client, sdk_pool_handle): + vdr_wallet_client, vdr_pool_handle): config = decreasedMonitoringTimeouts reqCount = 10 - sdk_send_random_and_check(looper, + vdr_send_random_and_check(looper, txnPoolNodeSet, - sdk_pool_handle, - sdk_wallet_client, + vdr_pool_handle, + vdr_wallet_client, reqCount) ensure_all_nodes_have_same_data(looper, nodes=txnPoolNodeSet) looper.runFor(WIND_SIZE * MIN_CNT) @@ -56,13 +56,13 @@ def testPostingLatency(postingStatsEnabled, decreasedMonitoringTimeouts, looper, txnPoolNodeSet, - sdk_wallet_client, sdk_pool_handle): + vdr_wallet_client, vdr_pool_handle): config = decreasedMonitoringTimeouts reqCount = 10 - sdk_send_random_and_check(looper, + vdr_send_random_and_check(looper, txnPoolNodeSet, - sdk_pool_handle, - sdk_wallet_client, + vdr_pool_handle, + vdr_wallet_client, reqCount) for node in txnPoolNodeSet: diff --git a/plenum/test/monitoring/test_throughput.py b/plenum/test/monitoring/test_throughput.py index 4c6f7c1eaa..7011345f93 100644 --- a/plenum/test/monitoring/test_throughput.py +++ b/plenum/test/monitoring/test_throughput.py @@ -3,7 +3,7 @@ import pytest from stp_core.common.log import getlogger -from plenum.test.helper import sdk_send_random_and_check +from plenum.test.helper import vdr_send_random_and_check nodeCount = 4 logger = getlogger() @@ -11,12 +11,12 @@ # noinspection PyIncorrectDocstring @pytest.mark.skip(reason="Duplicated in testThroughputThreshold") -def testThroughput(looper, txnPoolNodeSet, sdk_wallet_client, sdk_pool_handle): +def testThroughput(looper, txnPoolNodeSet, vdr_wallet_client, vdr_pool_handle): """ Checking if the throughput is being set """ for i in range(5): - sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_client, 1) + vdr_send_random_and_check(looper, txnPoolNodeSet, vdr_pool_handle, vdr_wallet_client, 1) for node in txnPoolNodeSet: masterThroughput, avgBackupThroughput = node.monitor.getThroughputs( diff --git a/plenum/test/monitoring/test_warn_unordered_log_msg.py b/plenum/test/monitoring/test_warn_unordered_log_msg.py index 5821175cc1..66a258530d 100644 --- a/plenum/test/monitoring/test_warn_unordered_log_msg.py +++ b/plenum/test/monitoring/test_warn_unordered_log_msg.py @@ -3,7 +3,7 @@ from plenum.test.malicious_behaviors_node import delaysCommitProcessing from plenum.test.test_node import getNonPrimaryReplicas from stp_core.common.log import getlogger -from plenum.test.helper import sdk_send_random_requests +from plenum.test.helper import vdr_send_random_requests nodeCount = 4 logger = getlogger() @@ -29,10 +29,10 @@ def txnPoolNodeSet(txnPoolNodeSet): # noinspection PyIncorrectDocstring def test_working_has_no_warn_log_msg(looper, txnPoolNodeSet, - sdk_pool_handle, sdk_wallet_client): + vdr_pool_handle, vdr_wallet_client): clear_unordered_requests(*txnPoolNodeSet) - sdk_send_random_requests(looper, sdk_pool_handle, sdk_wallet_client, 5) + vdr_send_random_requests(looper, vdr_pool_handle, vdr_wallet_client, 5) looper.runFor(1.2 * UNORDERED_CHECK_FREQ) assert all(len(node.monitor.unordered_requests) == 0 for node in txnPoolNodeSet) @@ -41,14 +41,14 @@ def test_working_has_no_warn_log_msg(looper, txnPoolNodeSet, # noinspection PyIncorrectDocstring def test_slow_node_has_warn_unordered_log_msg(looper, txnPoolNodeSet, - sdk_pool_handle, - sdk_wallet_client): + vdr_pool_handle, + vdr_wallet_client): clear_unordered_requests(*txnPoolNodeSet) slow_node = getNonPrimaryReplicas(txnPoolNodeSet, 0)[0].node delaysCommitProcessing(slow_node, delay=3 * UNORDERED_CHECK_FREQ) - sdk_send_random_requests(looper, sdk_pool_handle, sdk_wallet_client, 5) + vdr_send_random_requests(looper, vdr_pool_handle, vdr_wallet_client, 5) looper.runFor(2 * UNORDERED_CHECK_FREQ) assert all(len(node.monitor.unordered_requests) == 0 for node in txnPoolNodeSet if node.name != slow_node.name) diff --git a/plenum/test/node/test_api.py b/plenum/test/node/test_api.py index 63e798ddfe..98e40a8620 100644 --- a/plenum/test/node/test_api.py +++ b/plenum/test/node/test_api.py @@ -8,15 +8,15 @@ from plenum.test.testing_utils import FakeSomething -def test_ledger_id_for_request_fails(test_node): +def test_ledger_id_for_request_fails(vdr_test_node): for r in (Request(operation={}), Request(operation={TXN_TYPE: None})): with pytest.raises(ValueError) as excinfo: - test_node.ledger_id_for_request(r) + vdr_test_node.ledger_id_for_request(r) assert "TXN_TYPE is not defined for request" in str(excinfo.value) -def test_seq_no_db_updates(test_node): - oldSize = test_node.seqNoDB.size +def test_seq_no_db_updates(vdr_test_node): + oldSize = vdr_test_node.seqNoDB.size test_txn = { TXN_PAYLOAD: { TXN_PAYLOAD_TYPE: "2", @@ -32,12 +32,12 @@ def test_seq_no_db_updates(test_node): TXN_VERSION: "1" } - test_node.postTxnFromCatchupAddedToLedger(2, test_txn, False) - assert oldSize == test_node.seqNoDB.size + vdr_test_node.postTxnFromCatchupAddedToLedger(2, test_txn, False) + assert oldSize == vdr_test_node.seqNoDB.size -def test_seq_no_db_updates_by_default(test_node): - oldSize = test_node.seqNoDB.size +def test_seq_no_db_updates_by_default(vdr_test_node): + oldSize = vdr_test_node.seqNoDB.size test_txn = { TXN_PAYLOAD: { TXN_PAYLOAD_TYPE: "2", @@ -54,49 +54,49 @@ def test_seq_no_db_updates_by_default(test_node): TXN_VERSION: "1" } - test_node.postTxnFromCatchupAddedToLedger(2, test_txn) - assert oldSize + 2 == test_node.seqNoDB.size + vdr_test_node.postTxnFromCatchupAddedToLedger(2, test_txn) + assert oldSize + 2 == vdr_test_node.seqNoDB.size -def test_send_message_without_inst_id_to_replica(test_node): - replica = test_node.replicas[0] +def test_send_message_without_inst_id_to_replica(vdr_test_node): + replica = vdr_test_node.replicas[0] frm = "frm" msg = FakeSomething() - test_node.sendToReplica(msg, frm, replica.instId) - assert len(test_node.replicas) > 1 - for r in test_node.replicas.values(): + vdr_test_node.sendToReplica(msg, frm, replica.instId) + assert len(vdr_test_node.replicas) > 1 + for r in vdr_test_node.replicas.values(): if r == replica: assert (msg, frm) in r.inBox else: assert (msg, frm) not in r.inBox -def test_send_message_to_one_replica(test_node): - replica = test_node.replicas[0] +def test_send_message_to_one_replica(vdr_test_node): + replica = vdr_test_node.replicas[0] frm = "frm" msg = FakeSomething(instId=replica.instId) - test_node.sendToReplica(msg, frm) - assert len(test_node.replicas) > 1 - for r in test_node.replicas.values(): + vdr_test_node.sendToReplica(msg, frm) + assert len(vdr_test_node.replicas) > 1 + for r in vdr_test_node.replicas.values(): if r == replica: assert (msg, frm) in r.inBox else: assert (msg, frm) not in r.inBox -def test_send_message_to_incorrect_replica(test_node): +def test_send_message_to_incorrect_replica(vdr_test_node): frm = "frm" msg = FakeSomething(instId=100000) - test_node.sendToReplica(msg, frm) - assert len(test_node.replicas) > 1 - for r in test_node.replicas.values(): + vdr_test_node.sendToReplica(msg, frm) + assert len(vdr_test_node.replicas) > 1 + for r in vdr_test_node.replicas.values(): assert (msg, frm) not in r.inBox -def test_send_message_for_all_without_inst_id(test_node): +def test_send_message_for_all_without_inst_id(vdr_test_node): frm = "frm" msg = FakeSomething() - test_node.sendToReplica(msg, frm) - assert len(test_node.replicas) > 1 - for r in test_node.replicas.values(): + vdr_test_node.sendToReplica(msg, frm) + assert len(vdr_test_node.replicas) > 1 + for r in vdr_test_node.replicas.values(): assert (msg, frm) in r.inBox \ No newline at end of file diff --git a/plenum/test/node_catchup/catchup_req/test_catchup_with_disconnected_node.py b/plenum/test/node_catchup/catchup_req/test_catchup_with_disconnected_node.py index 83c3975a7f..446b10ff2b 100644 --- a/plenum/test/node_catchup/catchup_req/test_catchup_with_disconnected_node.py +++ b/plenum/test/node_catchup/catchup_req/test_catchup_with_disconnected_node.py @@ -3,7 +3,7 @@ from plenum.test.logging.conftest import logsearch from plenum.test.pool_transactions.helper import \ disconnect_node_and_ensure_disconnected -from plenum.test.helper import sdk_send_random_and_check +from plenum.test.helper import vdr_send_random_and_check from plenum.test.node_catchup.helper import waitNodeDataEquality from plenum.test.test_node import checkNodesConnected from plenum.test.view_change.helper import start_stopped_node @@ -14,8 +14,8 @@ def test_catchup_with_disconnected_node(tdir, tconf, looper, txnPoolNodeSet, - sdk_pool_handle, - sdk_wallet_client, + vdr_pool_handle, + vdr_wallet_client, allPluginsPath, logsearch): ''' @@ -41,8 +41,8 @@ def test_catchup_with_disconnected_node(tdir, tconf, looper.removeProdable(restarted_node) # Send more requests to active nodes - sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle, - sdk_wallet_client, len(rest_nodes) * 3) + vdr_send_random_and_check(looper, txnPoolNodeSet, vdr_pool_handle, + vdr_wallet_client, len(rest_nodes) * 3) waitNodeDataEquality(looper, *rest_nodes) # Stop Gamma diff --git a/plenum/test/node_catchup/catchup_req/test_catchup_with_one_slow_node.py b/plenum/test/node_catchup/catchup_req/test_catchup_with_one_slow_node.py index cb37903b8a..b805def4c1 100644 --- a/plenum/test/node_catchup/catchup_req/test_catchup_with_one_slow_node.py +++ b/plenum/test/node_catchup/catchup_req/test_catchup_with_one_slow_node.py @@ -7,7 +7,7 @@ from plenum.test.logging.conftest import logsearch from plenum.test.pool_transactions.helper import \ disconnect_node_and_ensure_disconnected -from plenum.test.helper import sdk_send_random_and_check, max_3pc_batch_limits, assertExp +from plenum.test.helper import vdr_send_random_and_check, max_3pc_batch_limits, assertExp from plenum.test.node_catchup.helper import waitNodeDataEquality from plenum.test.stasher import delay_rules from plenum.test.test_node import checkNodesConnected @@ -29,8 +29,8 @@ def tconf(tconf): def test_catchup_with_one_slow_node(tdir, tconf, looper, txnPoolNodeSet, - sdk_pool_handle, - sdk_wallet_client, + vdr_pool_handle, + vdr_wallet_client, allPluginsPath, logsearch): ''' @@ -63,8 +63,8 @@ def test_catchup_with_one_slow_node(tdir, tconf, looper.removeProdable(lagging_node) # Send more requests to active nodes - sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle, - sdk_wallet_client, len(non_lagging_nodes) * 3) + vdr_send_random_and_check(looper, txnPoolNodeSet, vdr_pool_handle, + vdr_wallet_client, len(non_lagging_nodes) * 3) waitNodeDataEquality(looper, *non_lagging_nodes) # Stop Gamma diff --git a/plenum/test/node_catchup/conftest.py b/plenum/test/node_catchup/conftest.py index 791ad04ce4..d05d1b6224 100644 --- a/plenum/test/node_catchup/conftest.py +++ b/plenum/test/node_catchup/conftest.py @@ -5,11 +5,11 @@ from stp_core.common.log import getlogger from plenum.common.util import randomString from plenum.test.conftest import getValueFromModule -from plenum.test.helper import sdk_send_random_and_check +from plenum.test.helper import vdr_send_random_and_check from plenum.test.node_catchup.helper import waitNodeDataEquality, \ check_last_3pc_master from plenum.test.pool_transactions.helper import \ - sdk_add_new_steward_and_node, sdk_pool_refresh + vdr_add_new_steward_and_node, vdr_pool_refresh from plenum.test.test_node import checkNodesConnected, getNonPrimaryReplicas @@ -22,37 +22,37 @@ def whitelist(): @pytest.fixture(scope="module") def sdk_node_created_after_some_txns_not_started(looper, testNodeClass, do_post_node_creation, - sdk_pool_handle, sdk_wallet_client, sdk_wallet_steward, + vdr_pool_handle, vdr_wallet_client, vdr_wallet_steward, txnPoolNodeSet, tdir, tconf, allPluginsPath, request): txnCount = getValueFromModule(request, "txnCount", 5) - sdk_send_random_and_check(looper, txnPoolNodeSet, - sdk_pool_handle, - sdk_wallet_client, + vdr_send_random_and_check(looper, txnPoolNodeSet, + vdr_pool_handle, + vdr_wallet_client, txnCount) new_steward_name = randomString() new_node_name = "Epsilon" - new_steward_wallet_handle, new_node = sdk_add_new_steward_and_node( - looper, sdk_pool_handle, sdk_wallet_steward, + new_steward_wallet_handle, new_node = vdr_add_new_steward_and_node( + looper, vdr_pool_handle, vdr_wallet_steward, new_steward_name, new_node_name, tdir, tconf, nodeClass=testNodeClass, allPluginsPath=allPluginsPath, autoStart=False, do_post_node_creation=do_post_node_creation) - sdk_pool_refresh(looper, sdk_pool_handle) - yield looper, new_node, sdk_pool_handle, new_steward_wallet_handle + vdr_pool_refresh(looper, vdr_pool_handle) + yield looper, new_node, vdr_pool_handle, new_steward_wallet_handle @pytest.fixture(scope="module") def poolAfterSomeTxns( looper, txnPoolNodeSet, - sdk_pool_handle, - sdk_wallet_client, + vdr_pool_handle, + vdr_wallet_client, request): txnCount = getValueFromModule(request, "txnCount", 5) - sdk_send_random_and_check(looper, txnPoolNodeSet, - sdk_pool_handle, - sdk_wallet_client, + vdr_send_random_and_check(looper, txnPoolNodeSet, + vdr_pool_handle, + vdr_wallet_client, txnCount) - yield looper, sdk_pool_handle, sdk_wallet_client + yield looper, vdr_pool_handle, vdr_wallet_client @pytest.fixture diff --git a/plenum/test/node_catchup/test_build_ledger_status.py b/plenum/test/node_catchup/test_build_ledger_status.py index dfaf6bc62c..1281ee0880 100644 --- a/plenum/test/node_catchup/test_build_ledger_status.py +++ b/plenum/test/node_catchup/test_build_ledger_status.py @@ -1,5 +1,5 @@ from plenum.common.constants import POOL_LEDGER_ID, DOMAIN_LEDGER_ID, CONFIG_LEDGER_ID, CURRENT_PROTOCOL_VERSION -from plenum.test.helper import sdk_send_random_and_check +from plenum.test.helper import vdr_send_random_and_check from plenum.test.node_catchup.helper import ensure_all_nodes_have_same_data from plenum.test.view_change.helper import ensure_view_change_complete @@ -35,17 +35,17 @@ def test_ledger_status_for_new_pool(txnPoolNodeSet): check_ledger_statuses(txnPoolNodeSet) -def test_ledger_status_after_txn_ordered(looper, txnPoolNodeSet, sdk_wallet_client, sdk_pool_handle): +def test_ledger_status_after_txn_ordered(looper, txnPoolNodeSet, vdr_wallet_client, vdr_pool_handle): # we expect last ordered 3PC is not None for Domain ledger only, as there is a txn added to Domain ledger - sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_client, 1) + vdr_send_random_and_check(looper, txnPoolNodeSet, vdr_pool_handle, vdr_wallet_client, 1) ensure_all_nodes_have_same_data(looper, txnPoolNodeSet) check_ledger_statuses(txnPoolNodeSet) -def test_ledger_status_after_catchup(looper, txnPoolNodeSet, sdk_wallet_client, sdk_pool_handle): +def test_ledger_status_after_catchup(looper, txnPoolNodeSet, vdr_wallet_client, vdr_pool_handle): # we expect last ordered 3PC is not None for Domain ledger only, as there is a txn added to Domain ledger - sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_client, 1) + vdr_send_random_and_check(looper, txnPoolNodeSet, vdr_pool_handle, vdr_wallet_client, 1) ensure_view_change_complete(looper, txnPoolNodeSet) ensure_all_nodes_have_same_data(looper, txnPoolNodeSet) @@ -53,10 +53,10 @@ def test_ledger_status_after_catchup(looper, txnPoolNodeSet, sdk_wallet_client, check_ledger_statuses(txnPoolNodeSet) -def test_ledger_status_for_new_node(looper, txnPoolNodeSet, sdk_node_created_after_some_txns): - _, new_node, sdk_pool_handle, new_steward_wallet_handle = sdk_node_created_after_some_txns - sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle, new_steward_wallet_handle, 1) - sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle, new_steward_wallet_handle, 1) +def test_ledger_status_for_new_node(looper, txnPoolNodeSet, vdr_node_created_after_some_txns): + _, new_node, sdk_pool_handle, new_steward_wallet_handle = vdr_node_created_after_some_txns + vdr_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle, new_steward_wallet_handle, 1) + vdr_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle, new_steward_wallet_handle, 1) ensure_all_nodes_have_same_data(looper, txnPoolNodeSet + [new_node], exclude_from_check=['check_last_ordered_3pc_backup']) diff --git a/plenum/test/node_catchup/test_catchup_delayed_nodes.py b/plenum/test/node_catchup/test_catchup_delayed_nodes.py index 28f4df7141..ae92871258 100644 --- a/plenum/test/node_catchup/test_catchup_delayed_nodes.py +++ b/plenum/test/node_catchup/test_catchup_delayed_nodes.py @@ -4,10 +4,10 @@ from plenum.test import waits from plenum.test.delayers import cpDelay -from plenum.test.helper import sdk_send_random_and_check +from plenum.test.helper import vdr_send_random_and_check from plenum.test.node_catchup.helper import waitNodeDataEquality -from plenum.test.pool_transactions.helper import sdk_add_new_steward_and_node, \ - sdk_pool_refresh +from plenum.test.pool_transactions.helper import vdr_add_new_steward_and_node, \ + vdr_pool_refresh from plenum.test.test_node import checkNodesConnected logger = getlogger() @@ -20,7 +20,7 @@ @pytest.mark.skip(reason="SOV-551. Incomplete implementation") def testCatchupDelayedNodes(txnPoolNodeSet, sdk_node_set_with_node_added_after_some_txns, - sdk_wallet_steward, + vdr_wallet_steward, txnPoolCliNodeReg, tdirWithPoolTxns, tconf, tdir, allPluginsPath): @@ -42,9 +42,9 @@ def testCatchupDelayedNodes(txnPoolNodeSet, nodeZName = "Theta" delayX = 45 delayY = 2 - stewardX, nodeX = sdk_add_new_steward_and_node(looper, + stewardX, nodeX = vdr_add_new_steward_and_node(looper, sdk_pool_handle, - sdk_wallet_steward, + vdr_wallet_steward, stewardXName, nodeXName, tdir, @@ -52,9 +52,9 @@ def testCatchupDelayedNodes(txnPoolNodeSet, autoStart=False, allPluginsPath=allPluginsPath) - stewardY, nodeY = sdk_add_new_steward_and_node(looper, + stewardY, nodeY = vdr_add_new_steward_and_node(looper, sdk_pool_handle, - sdk_wallet_steward, + vdr_wallet_steward, stewardYName, nodeYName, tdir, @@ -76,9 +76,9 @@ def testCatchupDelayedNodes(txnPoolNodeSet, nodeX.stop() nodeY.stop() logger.debug("Sending requests") - sdk_pool_refresh(looper, sdk_pool_handle) - sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle, - sdk_wallet_steward, 50) + vdr_pool_refresh(looper, sdk_pool_handle) + vdr_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle, + vdr_wallet_steward, 50) logger.debug("Starting the 2 stopped nodes, {} and {}".format(nodeX.name, nodeY.name)) nodeX.start(looper.loop) diff --git a/plenum/test/node_catchup/test_catchup_demoted.py b/plenum/test/node_catchup/test_catchup_demoted.py index a252811898..dcf888be38 100644 --- a/plenum/test/node_catchup/test_catchup_demoted.py +++ b/plenum/test/node_catchup/test_catchup_demoted.py @@ -3,11 +3,11 @@ from plenum.common.util import hexToFriendly from plenum.common.constants import VALIDATOR -from plenum.test.helper import sdk_send_random_and_check +from plenum.test.helper import vdr_send_random_and_check from plenum.test.node_catchup.helper import waitNodeDataEquality, \ checkNodeDataForInequality from plenum.test.pool_transactions.helper import \ - sdk_send_update_node + vdr_send_update_node from stp_core.common.log import getlogger from plenum.test.node_catchup.conftest import whitelist @@ -19,7 +19,7 @@ def test_catch_up_after_demoted( txnPoolNodeSet, sdk_node_set_with_node_added_after_some_txns, - sdk_wallet_client): + vdr_wallet_client): logger.info( "1. add a new node after sending some txns and check that catch-up " "is done (the new node is up to date)") @@ -29,7 +29,7 @@ def test_catch_up_after_demoted( logger.info("2. turn the new node off (demote)") node_dest = hexToFriendly(new_node.nodestack.verhex) - sdk_send_update_node(looper, new_steward_wallet_handle, + vdr_send_update_node(looper, new_steward_wallet_handle, sdk_pool_handle, node_dest, new_node.name, None, None, @@ -38,12 +38,12 @@ def test_catch_up_after_demoted( logger.info("3. send more requests, " "so that the new node's state is outdated") - sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle, - sdk_wallet_client, 5) + vdr_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle, + vdr_wallet_client, 5) checkNodeDataForInequality(new_node, *txnPoolNodeSet[:-1]) logger.info("4. turn the new node on") - sdk_send_update_node(looper, new_steward_wallet_handle, + vdr_send_update_node(looper, new_steward_wallet_handle, sdk_pool_handle, node_dest, new_node.name, None, None, @@ -56,6 +56,6 @@ def test_catch_up_after_demoted( logger.info("6. send more requests and make sure " "that the new node participates in processing them") - sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle, + vdr_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle, new_steward_wallet_handle, 10) waitNodeDataEquality(looper, new_node, *txnPoolNodeSet[:-1]) diff --git a/plenum/test/node_catchup/test_catchup_f_plus_one.py b/plenum/test/node_catchup/test_catchup_f_plus_one.py index a27776a814..18c606e206 100644 --- a/plenum/test/node_catchup/test_catchup_f_plus_one.py +++ b/plenum/test/node_catchup/test_catchup_f_plus_one.py @@ -1,6 +1,6 @@ from stp_core.common.log import getlogger from plenum.common.config_helper import PNodeConfigHelper -from plenum.test.helper import sdk_send_random_and_check +from plenum.test.helper import vdr_send_random_and_check from plenum.test.node_catchup.helper import waitNodeDataEquality, \ waitNodeDataInequality, checkNodeDataForEquality from plenum.test.pool_transactions.helper import \ @@ -16,8 +16,8 @@ def testNodeCatchupFPlusOne(looper, txnPoolNodeSet, - sdk_pool_handle, - sdk_wallet_steward, + vdr_pool_handle, + vdr_wallet_steward, tconf, tdir, tdirWithPoolTxns, allPluginsPath, testNodeClass): """ @@ -36,8 +36,8 @@ def testNodeCatchupFPlusOne(looper, looper.removeProdable(node0) logger.debug("Sending requests") - sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle, - sdk_wallet_steward, 5) + vdr_send_random_and_check(looper, txnPoolNodeSet, vdr_pool_handle, + vdr_wallet_steward, 5) logger.debug("Stopping node1 with pool ledger size {}". format(node1.poolManager.txnSeqNo)) @@ -67,7 +67,7 @@ def testNodeCatchupFPlusOne(looper, exclude_from_check=['check_last_ordered_3pc_backup']) logger.debug("Sending more requests") - sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle, - sdk_wallet_steward, 2) + vdr_send_random_and_check(looper, txnPoolNodeSet, vdr_pool_handle, + vdr_wallet_steward, 2) checkNodeDataForEquality(node0, *txnPoolNodeSet[:-2], exclude_from_check=['check_last_ordered_3pc_backup']) diff --git a/plenum/test/node_catchup/test_catchup_from_unequal_nodes_without_reasking.py b/plenum/test/node_catchup/test_catchup_from_unequal_nodes_without_reasking.py index 8d8d06b7df..784b5bf0a1 100644 --- a/plenum/test/node_catchup/test_catchup_from_unequal_nodes_without_reasking.py +++ b/plenum/test/node_catchup/test_catchup_from_unequal_nodes_without_reasking.py @@ -3,7 +3,7 @@ from plenum.common.constants import AUDIT_LEDGER_ID from plenum.server.catchup.node_leecher_service import NodeLeecherService from plenum.test.delayers import delay_3pc, lsDelay -from plenum.test.helper import sdk_send_random_and_check, max_3pc_batch_limits, assertExp +from plenum.test.helper import vdr_send_random_and_check, max_3pc_batch_limits, assertExp from plenum.test.node_catchup.helper import ensure_all_nodes_have_same_data from plenum.test.stasher import delay_rules_without_processing, delay_rules from stp_core.loop.eventually import eventually @@ -32,8 +32,8 @@ def tconf(tconf): def test_catchup_from_unequal_nodes_without_reasking(looper, txnPoolNodeSet, - sdk_pool_handle, - sdk_wallet_client): + vdr_pool_handle, + vdr_wallet_client): lagged_node_1 = txnPoolNodeSet[-1] lagged_node_2 = txnPoolNodeSet[-2] normal_nodes = [node for node in txnPoolNodeSet @@ -41,10 +41,10 @@ def test_catchup_from_unequal_nodes_without_reasking(looper, normal_stashers = [node.nodeIbStasher for node in normal_nodes] with delay_rules_without_processing(lagged_node_1.nodeIbStasher, delay_3pc()): - sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_client, 2) + vdr_send_random_and_check(looper, txnPoolNodeSet, vdr_pool_handle, vdr_wallet_client, 2) with delay_rules_without_processing(lagged_node_2.nodeIbStasher, delay_3pc()): - sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_client, 7) + vdr_send_random_and_check(looper, txnPoolNodeSet, vdr_pool_handle, vdr_wallet_client, 7) ensure_all_nodes_have_same_data(looper, normal_nodes, custom_timeout=30) # Perform catchup, while making sure that cons proof from lagging node is received diff --git a/plenum/test/node_catchup/test_catchup_from_unequal_nodes_without_waiting.py b/plenum/test/node_catchup/test_catchup_from_unequal_nodes_without_waiting.py index 41f0d33ebd..0f09c80e69 100644 --- a/plenum/test/node_catchup/test_catchup_from_unequal_nodes_without_waiting.py +++ b/plenum/test/node_catchup/test_catchup_from_unequal_nodes_without_waiting.py @@ -3,8 +3,8 @@ from plenum.common.messages.node_messages import Commit from plenum.server.catchup.node_leecher_service import NodeLeecherService from plenum.test.delayers import delay_3pc -from plenum.test.helper import sdk_send_random_and_check, max_3pc_batch_limits, assert_eq, sdk_send_random_requests, \ - sdk_get_replies, sdk_get_and_check_replies +from plenum.test.helper import vdr_send_random_and_check, max_3pc_batch_limits, assert_eq, vdr_send_random_requests, \ + vdr_get_replies, vdr_get_and_check_replies from plenum.test.node_catchup.helper import ensure_all_nodes_have_same_data from plenum.test.stasher import delay_rules_without_processing, delay_rules from stp_core.loop.eventually import eventually @@ -24,15 +24,15 @@ def tconf(tconf): def test_catchup_from_unequal_nodes_without_waiting(looper, txnPoolNodeSet, - sdk_pool_handle, - sdk_wallet_client): + vdr_pool_handle, + vdr_wallet_client): normal_node = txnPoolNodeSet[0] lagging_node_1 = txnPoolNodeSet[1] lagging_node_2 = txnPoolNodeSet[2] stopped_node = txnPoolNodeSet[3] # Make sure everyone have one batch - sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_client, 1) + vdr_send_random_and_check(looper, txnPoolNodeSet, vdr_pool_handle, vdr_wallet_client, 1) # Wait until all nodes have same data and store last 3PC number of node that's going to be "stopped" ensure_all_nodes_have_same_data(looper, txnPoolNodeSet, custom_timeout=30) @@ -40,16 +40,16 @@ def test_catchup_from_unequal_nodes_without_waiting(looper, with delay_rules_without_processing(stopped_node.nodeIbStasher, delay_3pc()): # Create one more batch on all nodes except "stopped" node - sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_client, 1) + vdr_send_random_and_check(looper, txnPoolNodeSet, vdr_pool_handle, vdr_wallet_client, 1) with delay_rules(lagging_node_1.nodeIbStasher, delay_3pc(msgs=Commit)): # Create one more batch on all nodes except "stopped" and first lagging node - sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_client, 1) + vdr_send_random_and_check(looper, txnPoolNodeSet, vdr_pool_handle, vdr_wallet_client, 1) with delay_rules(lagging_node_2.nodeIbStasher, delay_3pc(msgs=Commit)): # Create one more batch on all nodes except "stopped" and both lagging nodes # This time we can't wait for replies because there will be only one - reqs = sdk_send_random_requests(looper, sdk_pool_handle, sdk_wallet_client, 1) + reqs = vdr_send_random_requests(looper, vdr_pool_handle, vdr_wallet_client, 1) # Wait until normal node orders txn looper.run(eventually(lambda: assert_eq(normal_node.master_last_ordered_3PC[1], @@ -71,4 +71,4 @@ def test_catchup_from_unequal_nodes_without_waiting(looper, assert stopped_node.master_last_ordered_3PC[0] == last_3pc[0] # Make sure replies from last request are eventually received - sdk_get_and_check_replies(looper, reqs) + vdr_get_and_check_replies(looper, reqs) diff --git a/plenum/test/node_catchup/test_catchup_inlcuding_3PC.py b/plenum/test/node_catchup/test_catchup_inlcuding_3PC.py index 905b350d35..4957717959 100644 --- a/plenum/test/node_catchup/test_catchup_inlcuding_3PC.py +++ b/plenum/test/node_catchup/test_catchup_inlcuding_3PC.py @@ -2,7 +2,7 @@ from plenum.common.constants import DOMAIN_LEDGER_ID from plenum.common.util import check_if_all_equal_in_list -from plenum.test.helper import sdk_send_random_and_check +from plenum.test.helper import vdr_send_random_and_check from plenum.test.node_catchup.helper import check_last_3pc_master, \ waitNodeDataEquality from plenum.test.test_node import ensureElectionsDone @@ -33,18 +33,18 @@ def reset(): @pytest.fixture(scope="module") -def pre_check(tconf, looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_client): +def pre_check(tconf, looper, txnPoolNodeSet, vdr_pool_handle, vdr_wallet_client): # TODO: Maybe this needs to be extracted in another fixture for i in range(tconf.ProcessedBatchMapsToKeep - 1): - sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle, - sdk_wallet_client, 1) + vdr_send_random_and_check(looper, txnPoolNodeSet, vdr_pool_handle, + vdr_wallet_client, 1) # All node maintain the same map from txn range to 3PC looper.run(eventually(chk_if_equal_txn_to_3pc, txnPoolNodeSet)) for i in range(3): - sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle, - sdk_wallet_client, 1) + vdr_send_random_and_check(looper, txnPoolNodeSet, vdr_pool_handle, + vdr_wallet_client, 1) # All node maintain the same map from txn range to 3PC and its equal to # `tconf.ProcessedBatchMapsToKeep` even after sending more batches than @@ -56,9 +56,9 @@ def pre_check(tconf, looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_client) @pytest.mark.skip('Functionality removed in INDY-1946') def test_nodes_maintain_master_txn_3PC_map(looper, txnPoolNodeSet, pre_check, - sdk_node_created_after_some_txns): + vdr_node_created_after_some_txns): _, new_node, sdk_pool_handle, new_steward_wallet_handle = \ - sdk_node_created_after_some_txns + vdr_node_created_after_some_txns txnPoolNodeSet.append(new_node) waitNodeDataEquality(looper, new_node, *txnPoolNodeSet[:4], @@ -71,6 +71,6 @@ def test_nodes_maintain_master_txn_3PC_map(looper, txnPoolNodeSet, pre_check, nodes=txnPoolNodeSet) # Requests still processed - sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle, + vdr_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle, new_steward_wallet_handle, 2) waitNodeDataEquality(looper, new_node, *txnPoolNodeSet[:4]) diff --git a/plenum/test/node_catchup/test_catchup_not_triggered_if_another_in_progress.py b/plenum/test/node_catchup/test_catchup_not_triggered_if_another_in_progress.py index 586b3b3b3d..e0c5f6d3f1 100644 --- a/plenum/test/node_catchup/test_catchup_not_triggered_if_another_in_progress.py +++ b/plenum/test/node_catchup/test_catchup_not_triggered_if_another_in_progress.py @@ -27,8 +27,8 @@ def test_catchup_not_triggered_if_another_in_progress( chkFreqPatched, reqs_for_checkpoint, txnPoolNodeSet, - sdk_pool_handle, - sdk_wallet_client, + vdr_pool_handle, + vdr_wallet_client, broken_node_and_others): """ A node misses 3pc messages and checkpoints during some period but later it @@ -42,8 +42,8 @@ def test_catchup_not_triggered_if_another_in_progress( logger.info("Step 1: The node misses quite a lot of 3PC-messages and checkpoints") send_reqs_batches_and_get_suff_replies(looper, txnPoolNodeSet, - sdk_pool_handle, - sdk_wallet_client, + vdr_pool_handle, + vdr_wallet_client, reqs_for_checkpoint + max_batch_size) waitNodeDataInequality(looper, broken_node, *other_nodes) @@ -59,8 +59,8 @@ def test_catchup_not_triggered_if_another_in_progress( with delay_rules(repaired_node.nodeIbStasher, cr_delay()): send_reqs_batches_and_get_suff_replies(looper, txnPoolNodeSet, - sdk_pool_handle, - sdk_wallet_client, + vdr_pool_handle, + vdr_wallet_client, (Replica.STASHED_CHECKPOINTS_BEFORE_CATCHUP + 1) * reqs_for_checkpoint - max_batch_size) @@ -81,8 +81,8 @@ def test_catchup_not_triggered_if_another_in_progress( repaired_node.master_replica._checkpointer.spylog.count(CheckpointService.process_checkpoint) send_reqs_batches_and_get_suff_replies(looper, txnPoolNodeSet, - sdk_pool_handle, - sdk_wallet_client, + vdr_pool_handle, + vdr_wallet_client, (Replica.STASHED_CHECKPOINTS_BEFORE_CATCHUP + 1) * reqs_for_checkpoint) diff --git a/plenum/test/node_catchup/test_catchup_reasking.py b/plenum/test/node_catchup/test_catchup_reasking.py index 78c2e46d41..548e1983e8 100644 --- a/plenum/test/node_catchup/test_catchup_reasking.py +++ b/plenum/test/node_catchup/test_catchup_reasking.py @@ -3,7 +3,7 @@ from plenum.common.constants import LEDGER_STATUS, COMMIT from plenum.common.messages.node_messages import MessageRep, ConsistencyProof from plenum.test.delayers import delay_3pc, lsDelay, msg_rep_delay, cpDelay -from plenum.test.helper import sdk_send_random_and_check +from plenum.test.helper import vdr_send_random_and_check from plenum.test.node_catchup.helper import waitNodeDataEquality from plenum.test.stasher import delay_rules_without_processing from stp_core.loop.eventually import eventually @@ -21,8 +21,8 @@ def tconf(tconf): def test_catchup_with_reask_ls(txnPoolNodeSet, looper, - sdk_pool_handle, - sdk_wallet_steward, + vdr_pool_handle, + vdr_wallet_steward, tconf, tdir, allPluginsPath): @@ -32,14 +32,14 @@ def test_catchup_with_reask_ls(txnPoolNodeSet, Check that the catchup finished ''' lagged_node = txnPoolNodeSet[-1] - sdk_send_random_and_check(looper, txnPoolNodeSet, - sdk_pool_handle, sdk_wallet_steward, 5) + vdr_send_random_and_check(looper, txnPoolNodeSet, + vdr_pool_handle, vdr_wallet_steward, 5) lagged_node.nodeIbStasher.delay(msg_rep_delay(types_to_delay=[COMMIT])) with delay_rules_without_processing(lagged_node.nodeIbStasher, delay_3pc(), msg_rep_delay(types_to_delay=[COMMIT])): - sdk_send_random_and_check(looper, txnPoolNodeSet, - sdk_pool_handle, sdk_wallet_steward, + vdr_send_random_and_check(looper, txnPoolNodeSet, + vdr_pool_handle, vdr_wallet_steward, 2) lagged_node.nodeIbStasher.drop_delayeds() with delay_rules_without_processing(lagged_node.nodeIbStasher, @@ -62,8 +62,8 @@ def chk(): def test_catchup_with_reask_cp(txnPoolNodeSet, looper, - sdk_pool_handle, - sdk_wallet_steward, + vdr_pool_handle, + vdr_wallet_steward, tconf, tdir, allPluginsPath): @@ -73,12 +73,12 @@ def test_catchup_with_reask_cp(txnPoolNodeSet, Check that the catchup finished ''' lagged_node = txnPoolNodeSet[-1] - sdk_send_random_and_check(looper, txnPoolNodeSet, - sdk_pool_handle, sdk_wallet_steward, 5) + vdr_send_random_and_check(looper, txnPoolNodeSet, + vdr_pool_handle, vdr_wallet_steward, 5) with delay_rules_without_processing(lagged_node.nodeIbStasher, delay_3pc(), msg_rep_delay(types_to_delay=[COMMIT])): - sdk_send_random_and_check(looper, txnPoolNodeSet, - sdk_pool_handle, sdk_wallet_steward, + vdr_send_random_and_check(looper, txnPoolNodeSet, + vdr_pool_handle, vdr_wallet_steward, 2) lagged_node.nodeIbStasher.drop_delayeds() diff --git a/plenum/test/node_catchup/test_catchup_uses_only_nodes_with_cons_proofs.py b/plenum/test/node_catchup/test_catchup_uses_only_nodes_with_cons_proofs.py index 470ea412ef..9824ce70b4 100644 --- a/plenum/test/node_catchup/test_catchup_uses_only_nodes_with_cons_proofs.py +++ b/plenum/test/node_catchup/test_catchup_uses_only_nodes_with_cons_proofs.py @@ -2,7 +2,7 @@ from plenum.common.constants import AUDIT_LEDGER_ID from plenum.test.delayers import delay_3pc, cqDelay -from plenum.test.helper import sdk_send_random_and_check, max_3pc_batch_limits, assert_eq +from plenum.test.helper import vdr_send_random_and_check, max_3pc_batch_limits, assert_eq from plenum.test.node_catchup.helper import ensure_all_nodes_have_same_data from plenum.test.stasher import start_delaying, stop_delaying_and_process from stp_core.loop.eventually import eventually @@ -31,13 +31,13 @@ def tconf(tconf): def test_catchup_uses_only_nodes_with_cons_proofs(looper, txnPoolNodeSet, - sdk_pool_handle, - sdk_wallet_client): + vdr_pool_handle, + vdr_wallet_client): lagging_node = txnPoolNodeSet[-1] other_nodes = txnPoolNodeSet[:-1] start_delaying(lagging_node.nodeIbStasher, delay_3pc()) - sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_client, 10) + vdr_send_random_and_check(looper, txnPoolNodeSet, vdr_pool_handle, vdr_wallet_client, 10) catchup_reqs = {node.name: start_delaying(node.nodeIbStasher, cqDelay()) for node in other_nodes} diff --git a/plenum/test/node_catchup/test_catchup_with_all_nodes_sending_cons_proofs_dead.py b/plenum/test/node_catchup/test_catchup_with_all_nodes_sending_cons_proofs_dead.py index c0105c69e3..d0c4887416 100644 --- a/plenum/test/node_catchup/test_catchup_with_all_nodes_sending_cons_proofs_dead.py +++ b/plenum/test/node_catchup/test_catchup_with_all_nodes_sending_cons_proofs_dead.py @@ -4,7 +4,7 @@ from plenum.common.constants import AUDIT_LEDGER_ID from plenum.test.delayers import delay_3pc, cqDelay -from plenum.test.helper import sdk_send_random_and_check, max_3pc_batch_limits, assert_eq +from plenum.test.helper import vdr_send_random_and_check, max_3pc_batch_limits, assert_eq from plenum.test.logging.conftest import logsearch from plenum.test.node_catchup.helper import ensure_all_nodes_have_same_data from plenum.test.stasher import start_delaying, stop_delaying_and_process @@ -33,14 +33,14 @@ def tconf(tconf): def test_catchup_with_all_nodes_sending_cons_proofs_dead(looper, txnPoolNodeSet, - sdk_pool_handle, - sdk_wallet_client, + vdr_pool_handle, + vdr_wallet_client, logsearch): lagging_node = txnPoolNodeSet[-1] other_nodes = txnPoolNodeSet[:-1] start_delaying(lagging_node.nodeIbStasher, delay_3pc()) - sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_client, 10) + vdr_send_random_and_check(looper, txnPoolNodeSet, vdr_pool_handle, vdr_wallet_client, 10) log_re_ask, _ = logsearch(msgs=['requesting .* missing transactions after timeout']) old_re_ask_count = len(log_re_ask) diff --git a/plenum/test/node_catchup/test_catchup_with_ledger_statuses_in_old_format.py b/plenum/test/node_catchup/test_catchup_with_ledger_statuses_in_old_format.py index 3672e5b0ce..974cbdab67 100644 --- a/plenum/test/node_catchup/test_catchup_with_ledger_statuses_in_old_format.py +++ b/plenum/test/node_catchup/test_catchup_with_ledger_statuses_in_old_format.py @@ -2,7 +2,7 @@ from plenum.common.messages.fields import LedgerIdField, NonNegativeNumberField, \ MerkleRootField from plenum.common.messages.message_base import MessageBase -from plenum.test.helper import sdk_send_random_and_check, countDiscarded +from plenum.test.helper import vdr_send_random_and_check, countDiscarded from plenum.test.node_catchup.helper import waitNodeDataEquality from plenum.test.node_catchup.test_config_ledger import start_stopped_node from plenum.test.pool_transactions.helper import \ @@ -12,7 +12,7 @@ def test_catchup_with_ledger_statuses_in_old_format_from_one_node( - txnPoolNodeSet, looper, sdk_pool_handle, sdk_wallet_steward, + txnPoolNodeSet, looper, vdr_pool_handle, vdr_wallet_steward, tconf, tdir, allPluginsPath): """ A node is restarted and during a catch-up receives ledger statuses @@ -25,8 +25,8 @@ def test_catchup_with_ledger_statuses_in_old_format_from_one_node( old_node = txnPoolNodeSet[0] - sdk_send_random_and_check(looper, txnPoolNodeSet, - sdk_pool_handle, sdk_wallet_steward, 5) + vdr_send_random_and_check(looper, txnPoolNodeSet, + vdr_pool_handle, vdr_wallet_steward, 5) original_get_ledger_status = old_node.getLedgerStatus @@ -49,8 +49,8 @@ def get_ledger_status_without_protocol_version(ledgerId: int): txnPoolNodeSet, node_to_restart) looper.removeProdable(name=node_to_restart.name) - sdk_send_random_and_check(looper, txnPoolNodeSet, - sdk_pool_handle, sdk_wallet_steward, + vdr_send_random_and_check(looper, txnPoolNodeSet, + vdr_pool_handle, vdr_wallet_steward, 2) # add `node_to_restart` to pool @@ -69,8 +69,8 @@ def get_ledger_status_without_protocol_version(ledgerId: int): # Verify that `node_to_restart` participates in ordering # of further transactions - sdk_send_random_and_check(looper, txnPoolNodeSet, - sdk_pool_handle, sdk_wallet_steward, 5) + vdr_send_random_and_check(looper, txnPoolNodeSet, + vdr_pool_handle, vdr_wallet_steward, 5) waitNodeDataEquality(looper, node_to_restart, *other_nodes) diff --git a/plenum/test/node_catchup/test_catchup_with_old_txn_metadata_digest_format.py b/plenum/test/node_catchup/test_catchup_with_old_txn_metadata_digest_format.py index da44f5fb84..660e189c51 100644 --- a/plenum/test/node_catchup/test_catchup_with_old_txn_metadata_digest_format.py +++ b/plenum/test/node_catchup/test_catchup_with_old_txn_metadata_digest_format.py @@ -5,7 +5,7 @@ TXN_PAYLOAD_METADATA_PAYLOAD_DIGEST from plenum.common.txn_util import append_payload_metadata from plenum.test.delayers import delay_3pc -from plenum.test.helper import sdk_send_random_requests, sdk_get_replies +from plenum.test.helper import vdr_send_random_requests, vdr_get_replies from plenum.test.node_catchup.helper import ensure_all_nodes_have_same_data from plenum.test.stasher import delay_rules_without_processing from stp_core.loop.eventually import eventually @@ -18,8 +18,8 @@ def test_catchup_with_old_txn_metadata_digest_format(tdir, tconf, looper, txnPoolNodeSet, - sdk_pool_handle, - sdk_wallet_client, + vdr_pool_handle, + vdr_wallet_client, monkeypatch): lagging_node = txnPoolNodeSet[-1] lagging_stasher = lagging_node.nodeIbStasher @@ -50,7 +50,7 @@ def append_old_payload_metadata( # Order some transactions, with one node discarding messages with delay_rules_without_processing(lagging_stasher, delay_3pc()): - reps = sdk_send_random_requests(looper, sdk_pool_handle, sdk_wallet_client, 10) + reps = vdr_send_random_requests(looper, vdr_pool_handle, vdr_wallet_client, 10) looper.run(eventually(check_nodes_domain_ledger, other_nodes, initial_size + 10)) assert lagging_node.domainLedger.size == initial_size @@ -59,4 +59,4 @@ def append_old_payload_metadata( ensure_all_nodes_have_same_data(looper, txnPoolNodeSet) # Catch replies - sdk_get_replies(looper, reps) + vdr_get_replies(looper, reps) diff --git a/plenum/test/node_catchup/test_catchup_with_only_one_available_node.py b/plenum/test/node_catchup/test_catchup_with_only_one_available_node.py index 66ee20c90f..fa268371fa 100644 --- a/plenum/test/node_catchup/test_catchup_with_only_one_available_node.py +++ b/plenum/test/node_catchup/test_catchup_with_only_one_available_node.py @@ -4,7 +4,7 @@ from plenum.common.constants import AUDIT_LEDGER_ID from plenum.test.delayers import delay_3pc, cqDelay -from plenum.test.helper import sdk_send_random_and_check, max_3pc_batch_limits, assert_eq +from plenum.test.helper import vdr_send_random_and_check, max_3pc_batch_limits, assert_eq from plenum.test.logging.conftest import logsearch from plenum.test.node_catchup.helper import ensure_all_nodes_have_same_data from plenum.test.stasher import start_delaying, stop_delaying_and_process @@ -33,14 +33,14 @@ def tconf(tconf): def test_catchup_with_only_one_available_node(looper, txnPoolNodeSet, - sdk_pool_handle, - sdk_wallet_client, + vdr_pool_handle, + vdr_wallet_client, logsearch): lagging_node = txnPoolNodeSet[-1] other_nodes = txnPoolNodeSet[:-1] start_delaying(lagging_node.nodeIbStasher, delay_3pc()) - sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_client, 10) + vdr_send_random_and_check(looper, txnPoolNodeSet, vdr_pool_handle, vdr_wallet_client, 10) log_re_ask, _ = logsearch(msgs=['requesting .* missing transactions after timeout']) old_re_ask_count = len(log_re_ask) diff --git a/plenum/test/node_catchup/test_config_ledger.py b/plenum/test/node_catchup/test_config_ledger.py index ed61bb3767..e22a66399f 100644 --- a/plenum/test/node_catchup/test_config_ledger.py +++ b/plenum/test/node_catchup/test_config_ledger.py @@ -8,11 +8,11 @@ from plenum.test.node_catchup.helper import \ waitNodeDataEquality from plenum.test.pool_transactions.helper import \ - disconnect_node_and_ensure_disconnected, sdk_pool_refresh, sdk_add_new_steward_and_node + disconnect_node_and_ensure_disconnected, vdr_pool_refresh, vdr_add_new_steward_and_node from plenum.common.util import randomString -from plenum.test.helper import sdk_gen_request, sdk_sign_request_objects, \ - sdk_send_signed_requests, sdk_get_replies, sdk_get_and_check_replies, sdk_send_random_and_check +from plenum.test.helper import vdr_gen_request, vdr_sign_request_objects, \ + vdr_send_signed_requests, vdr_get_replies, vdr_get_and_check_replies, vdr_send_random_and_check from plenum.common.constants import CONFIG_LEDGER_ID, DATA from plenum.test.test_config_req_handler import write_conf_op, \ @@ -23,20 +23,20 @@ def write(key, val, looper, sdk_pool_handle, sdk_wallet): _, idr = sdk_wallet - reqs_obj = [sdk_gen_request(op, identifier=idr) + reqs_obj = [vdr_gen_request(op, identifier=idr) for op in [write_conf_op(key, val)]] - reqs = sdk_sign_request_objects(looper, sdk_wallet, reqs_obj) - sent_reqs = sdk_send_signed_requests(sdk_pool_handle, reqs) - sdk_get_and_check_replies(looper, sent_reqs, timeout=10) + reqs = vdr_sign_request_objects(looper, sdk_wallet, reqs_obj) + sent_reqs = vdr_send_signed_requests(sdk_pool_handle, reqs, looper) + vdr_get_and_check_replies(looper, sent_reqs, timeout=10) def read(key, looper, sdk_pool_handle, sdk_wallet): _, idr = sdk_wallet - reqs_obj = [sdk_gen_request(op, identifier=idr) + reqs_obj = [vdr_gen_request(op, identifier=idr) for op in [read_conf_op(key)]] - reqs = sdk_sign_request_objects(looper, sdk_wallet, reqs_obj) - sent_reqs = sdk_send_signed_requests(sdk_pool_handle, reqs) - (req, resp), = sdk_get_and_check_replies(looper, sent_reqs, timeout=10) + reqs = vdr_sign_request_objects(looper, sdk_wallet, reqs_obj) + sent_reqs = vdr_send_signed_requests(sdk_pool_handle, reqs, looper) + (req, resp), = vdr_get_and_check_replies(looper, sent_reqs, timeout=10) return json.loads(resp['result'][DATA])[key] @@ -55,7 +55,7 @@ def testNodeBootstrapClass(): @pytest.fixture(scope="module") def sdk_node_created_after_some_txns(looper, testNodeClass, do_post_node_creation, - sdk_pool_handle, sdk_wallet_client, sdk_wallet_steward, + vdr_pool_handle, vdr_wallet_client, vdr_wallet_steward, txnPoolNodeSet, tdir, tconf, allPluginsPath, request, setup): def post_node_creation(node): write_rh = WriteConfHandler(node.db_manager) @@ -70,19 +70,19 @@ def post_node_creation(node): return node txnCount = getValueFromModule(request, "txnCount", 5) - sdk_send_random_and_check(looper, txnPoolNodeSet, - sdk_pool_handle, - sdk_wallet_client, + vdr_send_random_and_check(looper, txnPoolNodeSet, + vdr_pool_handle, + vdr_wallet_client, txnCount) new_steward_name = randomString() new_node_name = "Epsilon" - new_steward_wallet_handle, new_node = sdk_add_new_steward_and_node( - looper, sdk_pool_handle, sdk_wallet_steward, + new_steward_wallet_handle, new_node = vdr_add_new_steward_and_node( + looper, vdr_pool_handle, vdr_wallet_steward, new_steward_name, new_node_name, tdir, tconf, nodeClass=testNodeClass, allPluginsPath=allPluginsPath, autoStart=True, do_post_node_creation=post_node_creation) - sdk_pool_refresh(looper, sdk_pool_handle) - yield looper, new_node, sdk_pool_handle, new_steward_wallet_handle + vdr_pool_refresh(looper, vdr_pool_handle) + yield looper, new_node, vdr_pool_handle, new_steward_wallet_handle @pytest.fixture(scope="module") @@ -93,8 +93,8 @@ def setup(testNodeClass, txnPoolNodeSet): ca._query_types.add(READ_CONF) -def test_config_ledger_txns(looper, setup, txnPoolNodeSet, sdk_wallet_client, - sdk_pool_handle): +def test_config_ledger_txns(looper, setup, txnPoolNodeSet, vdr_wallet_client, + vdr_pool_handle): """ Do some writes and reads on the config ledger """ @@ -112,34 +112,34 @@ def test_config_ledger_txns(looper, setup, txnPoolNodeSet, sdk_wallet_client, # Do a write txn key, val = 'test_key', 'test_val' - write(key, val, looper, sdk_pool_handle, sdk_wallet_client) + write(key, val, looper, vdr_pool_handle, vdr_wallet_client) for node in txnPoolNodeSet: assert len(node.getLedger(CONFIG_LEDGER_ID)) == (old_config_ledger_size + 1) state_root_hashes.add(state_roots_serializer.serialize(state.committedHeadHash)) - assert read(key, looper, sdk_pool_handle, sdk_wallet_client) == val + assert read(key, looper, vdr_pool_handle, vdr_wallet_client) == val old_config_ledger_size += 1 key, val = 'test_key', 'test_val1' - write(key, val, looper, sdk_pool_handle, sdk_wallet_client) + write(key, val, looper, vdr_pool_handle, vdr_wallet_client) for node in txnPoolNodeSet: assert len(node.getLedger(CONFIG_LEDGER_ID)) == (old_config_ledger_size + 1) state_root_hashes.add(state_roots_serializer.serialize(state.committedHeadHash)) - assert read(key, looper, sdk_pool_handle, sdk_wallet_client) == val + assert read(key, looper, vdr_pool_handle, vdr_wallet_client) == val old_config_ledger_size += 1 key, val = 'test_key1', 'test_val11' - write(key, val, looper, sdk_pool_handle, sdk_wallet_client) + write(key, val, looper, vdr_pool_handle, vdr_wallet_client) for node in txnPoolNodeSet: assert len(node.getLedger(CONFIG_LEDGER_ID)) == (old_config_ledger_size + 1) state_root_hashes.add(state_roots_serializer.serialize(state.committedHeadHash)) - assert read(key, looper, sdk_pool_handle, sdk_wallet_client) == val + assert read(key, looper, vdr_pool_handle, vdr_wallet_client) == val for node in txnPoolNodeSet: # Not all batches might have BLS-sig but at least one of them will have @@ -161,8 +161,8 @@ def keys(): @pytest.fixture(scope="module") def some_config_txns_done(looper, setup, txnPoolNodeSet, keys, - sdk_wallet_client, sdk_pool_handle): - return send_some_config_txns(looper, sdk_pool_handle, sdk_wallet_client, keys) + vdr_wallet_client, vdr_pool_handle): + return send_some_config_txns(looper, vdr_pool_handle, vdr_wallet_client, keys) def start_stopped_node(stopped_node, looper, tconf, @@ -183,20 +183,20 @@ def start_stopped_node(stopped_node, looper, tconf, def test_new_node_catchup_config_ledger(looper, some_config_txns_done, - txnPoolNodeSet, sdk_new_node_caught_up): + txnPoolNodeSet, vdr_new_node_caught_up): """ A new node catches up the config ledger too """ - assert len(sdk_new_node_caught_up.getLedger(CONFIG_LEDGER_ID)) >= \ + assert len(vdr_new_node_caught_up.getLedger(CONFIG_LEDGER_ID)) >= \ len(some_config_txns_done) def test_restarted_node_catches_up_config_ledger_txns(looper, some_config_txns_done, txnPoolNodeSet, - sdk_wallet_client, - sdk_pool_handle, - sdk_new_node_caught_up, + vdr_wallet_client, + vdr_pool_handle, + vdr_new_node_caught_up, keys, tconf, tdir, @@ -205,14 +205,14 @@ def test_restarted_node_catches_up_config_ledger_txns(looper, A node is stopped, a few config ledger txns happen, the stopped node is started and catches up the config ledger """ - new_node = sdk_new_node_caught_up + new_node = vdr_new_node_caught_up disconnect_node_and_ensure_disconnected( looper, txnPoolNodeSet, new_node, stopNode=True) looper.removeProdable(new_node) # Do some config txns; using a fixture as a method, passing some arguments # as None as they only make sense for the fixture (pre-requisites) - send_some_config_txns(looper, sdk_pool_handle, sdk_wallet_client, keys) + send_some_config_txns(looper, vdr_pool_handle, vdr_wallet_client, keys) # Make sure new node got out of sync for node in txnPoolNodeSet[:-1]: diff --git a/plenum/test/node_catchup/test_discard_view_no.py b/plenum/test/node_catchup/test_discard_view_no.py index 43ecdb904a..a235a47943 100644 --- a/plenum/test/node_catchup/test_discard_view_no.py +++ b/plenum/test/node_catchup/test_discard_view_no.py @@ -13,8 +13,8 @@ def testNodeDiscardMessageFromUnknownView(txnPoolNodeSet, sdk_node_set_with_node_added_after_some_txns, - sdk_new_node_caught_up, - allPluginsPath, sdk_wallet_client): + vdr_new_node_caught_up, + allPluginsPath, vdr_wallet_client): """ Node discards 3-phase or ViewChangeDone messages from view nos that it does not know of (view nos before it joined the pool) @@ -40,7 +40,7 @@ def testNodeDiscardMessageFromUnknownView(txnPoolNodeSet, messageTimeout = waits.expectedNodeToNodeMessageDeliveryTime() # 3 pc msg (PrePrepare) needs to be discarded - _, did = sdk_wallet_client + _, did = vdr_wallet_client primaryRepl = getPrimaryReplica(txnPoolNodeSet) inst_id = 0 three_pc = create_pre_prepare_no_bls(primaryRepl.node.db_manager.get_state_root_hash(DOMAIN_LEDGER_ID), diff --git a/plenum/test/node_catchup/test_incorrect_catchup_request.py b/plenum/test/node_catchup/test_incorrect_catchup_request.py index 8c9fa125d2..31e50cc9bc 100644 --- a/plenum/test/node_catchup/test_incorrect_catchup_request.py +++ b/plenum/test/node_catchup/test_incorrect_catchup_request.py @@ -2,7 +2,7 @@ from plenum.common.messages.node_messages import CatchupReq from stp_core.common.log import getlogger -from plenum.test.helper import sdk_send_random_and_check +from plenum.test.helper import vdr_send_random_and_check logger = getlogger() ledger_id = 1 @@ -10,15 +10,15 @@ def test_receive_incorrect_catchup_request_with_end_greater_catchuptill(looper, txnPoolNodeSet, - sdk_pool_handle, - sdk_wallet_client): + vdr_pool_handle, + vdr_wallet_client): end = 15 catchup_till = 10 req = CatchupReq(ledger_id, 1, end, catchup_till) - sdk_send_random_and_check(looper, + vdr_send_random_and_check(looper, txnPoolNodeSet, - sdk_pool_handle, - sdk_wallet_client, + vdr_pool_handle, + vdr_wallet_client, 4) ledger_manager = txnPoolNodeSet[0].ledgerManager ledger_manager.processCatchupReq(req, "frm") @@ -30,15 +30,15 @@ def test_receive_incorrect_catchup_request_with_end_greater_catchuptill(looper, def test_receive_incorrect_catchup_request_with_start_greater_end(looper, txnPoolNodeSet, - sdk_pool_handle, - sdk_wallet_client): + vdr_pool_handle, + vdr_wallet_client): start = 10 end = 5 req = CatchupReq(ledger_id, start, end, 11) - sdk_send_random_and_check(looper, + vdr_send_random_and_check(looper, txnPoolNodeSet, - sdk_pool_handle, - sdk_wallet_client, + vdr_pool_handle, + vdr_wallet_client, 4) ledger_manager = txnPoolNodeSet[0].ledgerManager ledger_manager.processCatchupReq(req, "frm") @@ -51,14 +51,14 @@ def test_receive_incorrect_catchup_request_with_start_greater_end(looper, def test_receive_incorrect_catchup_request_with_catchuptill_greater_ledger_size( looper, txnPoolNodeSet, - sdk_pool_handle, - sdk_wallet_client): + vdr_pool_handle, + vdr_wallet_client): catchup_till = 100 req = CatchupReq(ledger_id, 1, 10, catchup_till) - sdk_send_random_and_check(looper, + vdr_send_random_and_check(looper, txnPoolNodeSet, - sdk_pool_handle, - sdk_wallet_client, + vdr_pool_handle, + vdr_wallet_client, 4) ledger_manager = txnPoolNodeSet[0].ledgerManager ledger_manager.processCatchupReq(req, "frm") diff --git a/plenum/test/node_catchup/test_large_catchup.py b/plenum/test/node_catchup/test_large_catchup.py index 32fc1fe0f9..9c3680ed00 100644 --- a/plenum/test/node_catchup/test_large_catchup.py +++ b/plenum/test/node_catchup/test_large_catchup.py @@ -4,7 +4,7 @@ from plenum.common.config_helper import PNodeConfigHelper from plenum.test.pool_transactions.helper import \ disconnect_node_and_ensure_disconnected -from plenum.test.helper import sdk_send_random_and_check +from plenum.test.helper import vdr_send_random_and_check from plenum.test.node_catchup.helper import waitNodeDataEquality from stp_core.validators.message_length_validator import MessageLenValidator @@ -44,8 +44,8 @@ def test_large_catchup(tdir, tconf, looper, testNodeClass, txnPoolNodeSet, - sdk_pool_handle, - sdk_wallet_client, + vdr_pool_handle, + vdr_wallet_client, allPluginsPath): """ Checks that node can catchup large ledgers @@ -56,8 +56,8 @@ def test_large_catchup(tdir, tconf, all_nodes = txnPoolNodeSet # Check that requests executed well - sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle, - sdk_wallet_client, 10) + vdr_send_random_and_check(looper, txnPoolNodeSet, vdr_pool_handle, + vdr_wallet_client, 10) # Stop one node waitNodeDataEquality(looper, lagging_node, *rest_nodes) @@ -68,8 +68,8 @@ def test_large_catchup(tdir, tconf, looper.removeProdable(lagging_node) # Send more requests to active nodes - sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle, - sdk_wallet_client, 100) + vdr_send_random_and_check(looper, txnPoolNodeSet, vdr_pool_handle, + vdr_wallet_client, 100) waitNodeDataEquality(looper, *rest_nodes) # Make message size limit smaller to ensure that catchup response is diff --git a/plenum/test/node_catchup/test_new_node_catchup.py b/plenum/test/node_catchup/test_new_node_catchup.py index bb98cdd988..a0876b1c60 100644 --- a/plenum/test/node_catchup/test_new_node_catchup.py +++ b/plenum/test/node_catchup/test_new_node_catchup.py @@ -10,7 +10,7 @@ txnCount = 5 -def testNewNodeCatchup(sdk_new_node_caught_up): +def testNewNodeCatchup(vdr_new_node_caught_up): """ A new node that joins after some transactions are done should eventually get those transactions. @@ -22,7 +22,7 @@ def testNewNodeCatchup(sdk_new_node_caught_up): def testPoolLegerCatchupBeforeDomainLedgerCatchup(txnPoolNodeSet, - sdk_new_node_caught_up): + vdr_new_node_caught_up): """ For new node, this should be the sequence of events: 1. Pool ledger starts catching up. @@ -31,7 +31,7 @@ def testPoolLegerCatchupBeforeDomainLedgerCatchup(txnPoolNodeSet, 4. Domain ledger completes catching up Every node's pool ledger starts catching up before it """ - newNode = sdk_new_node_caught_up + newNode = vdr_new_node_caught_up starts = newNode.ledgerManager.spylog.getAll( TestLedgerManager._on_ledger_sync_start.__name__) completes = newNode.ledgerManager.spylog.getAll( diff --git a/plenum/test/node_catchup/test_new_node_catchup2.py b/plenum/test/node_catchup/test_new_node_catchup2.py index 0771487820..861656636c 100644 --- a/plenum/test/node_catchup/test_new_node_catchup2.py +++ b/plenum/test/node_catchup/test_new_node_catchup2.py @@ -2,7 +2,7 @@ from plenum.test import waits from plenum.test.delayers import cqDelay -from plenum.test.helper import sdk_send_random_and_check +from plenum.test.helper import vdr_send_random_and_check from plenum.test.node_catchup.helper import waitNodeDataEquality from plenum.test.node_request.node_request_helper import chk_commits_prepares_recvd from plenum.test.test_node import ensureElectionsDone @@ -28,7 +28,7 @@ def nodes_slow_to_process_catchup_reqs(txnPoolNodeSet): def testNodeDoesNotParticipateUntilCaughtUp(txnPoolNodeSet, nodes_slow_to_process_catchup_reqs, - sdk_node_created_after_some_txns): + vdr_node_created_after_some_txns): """ A new node that joins after some transactions should stash new transactions until it has caught up @@ -36,10 +36,10 @@ def testNodeDoesNotParticipateUntilCaughtUp(txnPoolNodeSet, """ looper, new_node, sdk_pool_handle, new_steward_wallet_handle = \ - sdk_node_created_after_some_txns + vdr_node_created_after_some_txns txnPoolNodeSet.append(new_node) old_nodes = txnPoolNodeSet[:-1] - sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle, + vdr_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle, new_steward_wallet_handle, 4) chk_commits_prepares_recvd(0, old_nodes, new_node) @@ -53,7 +53,7 @@ def testNodeDoesNotParticipateUntilCaughtUp(txnPoolNodeSet, waitNodeDataEquality(looper, new_node, *old_nodes, exclude_from_check=['check_last_ordered_3pc_backup']) - sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle, + vdr_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle, new_steward_wallet_handle, 2) # Commits and Prepares are received by all old nodes diff --git a/plenum/test/node_catchup/test_node_catchup_after_checkpoints.py b/plenum/test/node_catchup/test_node_catchup_after_checkpoints.py index 606a24f59f..ab25b93998 100644 --- a/plenum/test/node_catchup/test_node_catchup_after_checkpoints.py +++ b/plenum/test/node_catchup/test_node_catchup_after_checkpoints.py @@ -21,8 +21,8 @@ def test_node_catchup_after_checkpoints( chkFreqPatched, reqs_for_checkpoint, txnPoolNodeSet, - sdk_pool_handle, - sdk_wallet_client, + vdr_pool_handle, + vdr_wallet_client, broken_node_and_others): """ A node misses 3pc messages and checkpoints during some period but later it @@ -35,8 +35,8 @@ def test_node_catchup_after_checkpoints( logger.info("Step 1: The node misses quite a lot of requests") send_reqs_batches_and_get_suff_replies(looper, txnPoolNodeSet, - sdk_pool_handle, - sdk_wallet_client, + vdr_pool_handle, + vdr_wallet_client, reqs_for_checkpoint + max_batch_size) waitNodeDataInequality(looper, broken_node, *other_nodes) @@ -50,8 +50,8 @@ def test_node_catchup_after_checkpoints( completed_catchups_before = get_number_of_completed_catchups(broken_node) send_reqs_batches_and_get_suff_replies(looper, txnPoolNodeSet, - sdk_pool_handle, - sdk_wallet_client, + vdr_pool_handle, + vdr_wallet_client, (Replica.STASHED_CHECKPOINTS_BEFORE_CATCHUP + 1) * reqs_for_checkpoint - max_batch_size) @@ -69,8 +69,8 @@ def test_node_catchup_after_checkpoints( logger.info("Step 3: Check if the node is able to process requests") send_reqs_batches_and_get_suff_replies(looper, txnPoolNodeSet, - sdk_pool_handle, - sdk_wallet_client, + vdr_pool_handle, + vdr_wallet_client, reqs_for_checkpoint + max_batch_size) waitNodeDataEquality(looper, repaired_node, *other_nodes) diff --git a/plenum/test/node_catchup/test_node_catchup_after_disconnect.py b/plenum/test/node_catchup/test_node_catchup_after_disconnect.py index c60ac1a429..2d3fda22b3 100644 --- a/plenum/test/node_catchup/test_node_catchup_after_disconnect.py +++ b/plenum/test/node_catchup/test_node_catchup_after_disconnect.py @@ -1,7 +1,7 @@ import pytest from stp_core.common.log import getlogger -from plenum.test.helper import sdk_send_random_and_check +from plenum.test.helper import vdr_send_random_and_check from plenum.test.node_catchup.helper import waitNodeDataEquality, \ waitNodeDataInequality, checkNodeDataForEquality from plenum.test.pool_transactions.helper import \ @@ -17,7 +17,7 @@ # TODO: Refactor tests to minimize module-scoped fixtures.They make tests # depend on each other @pytest.mark.skip(reason="INDY-1297. Node does not catch up on reconnection anymore.") -def testNodeCatchupAfterDisconnect(sdk_new_node_caught_up, txnPoolNodeSet, +def testNodeCatchupAfterDisconnect(vdr_new_node_caught_up, txnPoolNodeSet, sdk_node_set_with_node_added_after_some_txns): """ A node that disconnects after some transactions should eventually get the @@ -34,7 +34,7 @@ def testNodeCatchupAfterDisconnect(sdk_new_node_caught_up, txnPoolNodeSet, # TODO: Check if the node has really stopped processing requests? logger.debug("Sending requests") - sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle, + vdr_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle, new_steward_wallet_handle, 5) # Make sure new node got out of sync waitNodeDataInequality(looper, new_node, *txnPoolNodeSet[:-1]) @@ -46,6 +46,6 @@ def testNodeCatchupAfterDisconnect(sdk_new_node_caught_up, txnPoolNodeSet, waitNodeDataEquality(looper, new_node, *txnPoolNodeSet[:-1]) logger.debug("Sending more requests") - sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle, + vdr_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle, new_steward_wallet_handle, 10) checkNodeDataForEquality(new_node, *txnPoolNodeSet[:-1]) diff --git a/plenum/test/node_catchup/test_node_catchup_after_restart_after_txns.py b/plenum/test/node_catchup/test_node_catchup_after_restart_after_txns.py index 97ebee8bc7..4492ae00f4 100644 --- a/plenum/test/node_catchup/test_node_catchup_after_restart_after_txns.py +++ b/plenum/test/node_catchup/test_node_catchup_after_restart_after_txns.py @@ -7,7 +7,7 @@ from plenum.common.types import HA from plenum.test import waits from plenum.test.delayers import cr_delay -from plenum.test.helper import sdk_send_random_and_check +from plenum.test.helper import vdr_send_random_and_check from plenum.test.node_catchup.helper import waitNodeDataEquality, \ check_ledger_state from plenum.test.pool_transactions.helper import \ @@ -36,7 +36,7 @@ def tconf(tconf): # and after prepares, respectively. Here is the pivotal link # https://www.pivotaltracker.com/story/show/127897273 def test_node_catchup_after_restart_with_txns( - sdk_new_node_caught_up, + vdr_new_node_caught_up, txnPoolNodeSet, tdir, tconf, @@ -62,7 +62,7 @@ def test_node_catchup_after_restart_with_txns( # TODO: Check if the node has really stopped processing requests? logger.debug("Sending requests") more_requests = 5 - sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle, + vdr_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle, new_steward_wallet_handle, more_requests) logger.debug("Starting the stopped node, {}".format(new_node)) nodeHa, nodeCHa = HA(*new_node.nodestack.ha), HA(*new_node.clientstack.ha) diff --git a/plenum/test/node_catchup/test_node_catchup_after_restart_no_txns.py b/plenum/test/node_catchup/test_node_catchup_after_restart_no_txns.py index fdac4ca0c0..30d7310e97 100644 --- a/plenum/test/node_catchup/test_node_catchup_after_restart_no_txns.py +++ b/plenum/test/node_catchup/test_node_catchup_after_restart_no_txns.py @@ -18,7 +18,7 @@ def test_node_catchup_after_restart_no_txns( - sdk_new_node_caught_up, + vdr_new_node_caught_up, txnPoolNodeSet, tdir, tconf, diff --git a/plenum/test/node_catchup/test_node_catchup_causes_no_desync.py b/plenum/test/node_catchup/test_node_catchup_causes_no_desync.py index afbb5c05ef..4fc0c48281 100644 --- a/plenum/test/node_catchup/test_node_catchup_causes_no_desync.py +++ b/plenum/test/node_catchup/test_node_catchup_causes_no_desync.py @@ -7,7 +7,7 @@ from plenum.test.node_catchup.test_node_reject_invalid_txn_during_catchup import \ get_any_non_primary_node from stp_core.common.log import getlogger -from plenum.test.helper import sdk_send_random_and_check +from plenum.test.helper import vdr_send_random_and_check from plenum.test.node_catchup.helper import \ waitNodeDataEquality, \ waitNodeDataInequality @@ -46,8 +46,8 @@ def replicas_synced(node): LOG_SIZE = 3 * CHK_FREQ -def test_node_catchup_causes_no_desync(looper, txnPoolNodeSet, sdk_pool_handle, - sdk_wallet_client, monkeypatch, +def test_node_catchup_causes_no_desync(looper, txnPoolNodeSet, vdr_pool_handle, + vdr_wallet_client, monkeypatch, chkFreqPatched, reqs_for_checkpoint): """ Checks that transactions received by catchup do not @@ -65,16 +65,16 @@ def test_node_catchup_causes_no_desync(looper, txnPoolNodeSet, sdk_pool_handle, lambda *x, **y: None) # Send some requests and check that all replicas except master executed it - sdk_send_random_and_check(looper, txnPoolNodeSet, - sdk_pool_handle, sdk_wallet_client, + vdr_send_random_and_check(looper, txnPoolNodeSet, + vdr_pool_handle, vdr_wallet_client, reqs_for_checkpoint - max_batch_size) waitNodeDataInequality(looper, lagging_node, *rest_nodes) looper.run(eventually(backup_replicas_run_forward, lagging_node)) assert not lagging_node.monitor.isMasterDegraded() - sdk_send_random_and_check(looper, txnPoolNodeSet, - sdk_pool_handle, sdk_wallet_client, + vdr_send_random_and_check(looper, txnPoolNodeSet, + vdr_pool_handle, vdr_wallet_client, reqs_for_checkpoint + max_batch_size) # Check that catchup done waitNodeDataEquality(looper, lagging_node, *rest_nodes) @@ -83,8 +83,8 @@ def test_node_catchup_causes_no_desync(looper, txnPoolNodeSet, sdk_pool_handle, # Send some more requests to ensure that backup and master replicas # are in the same state - sdk_send_random_and_check(looper, txnPoolNodeSet, - sdk_pool_handle, sdk_wallet_client, + vdr_send_random_and_check(looper, txnPoolNodeSet, + vdr_pool_handle, vdr_wallet_client, reqs_for_checkpoint - max_batch_size) looper.run(eventually(replicas_synced, lagging_node)) diff --git a/plenum/test/node_catchup/test_node_catchup_when_3_not_primary_node_restarted.py b/plenum/test/node_catchup/test_node_catchup_when_3_not_primary_node_restarted.py index 83cdab5a0b..ff05b3c13e 100644 --- a/plenum/test/node_catchup/test_node_catchup_when_3_not_primary_node_restarted.py +++ b/plenum/test/node_catchup/test_node_catchup_when_3_not_primary_node_restarted.py @@ -1,7 +1,7 @@ from plenum.test.view_change.helper import ensure_all_nodes_have_same_data, \ start_stopped_node from plenum.common.constants import DOMAIN_LEDGER_ID, LedgerState, POOL_LEDGER_ID -from plenum.test.helper import sdk_send_random_and_check +from plenum.test.helper import vdr_send_random_and_check from stp_core.common.log import getlogger from stp_core.loop.eventually import eventually @@ -20,7 +20,7 @@ def catchuped(node): def test_node_catchup_when_3_not_primary_node_restarted( looper, txnPoolNodeSet, tdir, tconf, - allPluginsPath, sdk_wallet_steward, sdk_pool_handle): + allPluginsPath, vdr_wallet_steward, vdr_pool_handle): """ Test case: 1. Create pool of 4 nodes @@ -55,8 +55,8 @@ def start_stop_one_node(node_to_restart, pool_of_nodes): ensure_all_nodes_have_same_data(looper, remaining_nodes, custom_timeout=tconf.NEW_VIEW_TIMEOUT) - sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle, - sdk_wallet_steward, 1) + vdr_send_random_and_check(looper, txnPoolNodeSet, vdr_pool_handle, + vdr_wallet_steward, 1) node_to_restart = start_stopped_node(node_to_restart, looper, tconf, @@ -83,8 +83,8 @@ def start_stop_one_node(node_to_restart, pool_of_nodes): node_to_restart = [n for n in pool_of_nodes if n.name == nodes_names[__]][0] assert not node_to_restart.has_master_primary pool_of_nodes = start_stop_one_node(node_to_restart, pool_of_nodes) - sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle, - sdk_wallet_steward, 1) + vdr_send_random_and_check(looper, txnPoolNodeSet, vdr_pool_handle, + vdr_wallet_steward, 1) ensure_all_nodes_have_same_data(looper, pool_of_nodes, custom_timeout=tconf.NEW_VIEW_TIMEOUT) diff --git a/plenum/test/node_catchup/test_node_catchup_with_connection_problem.py b/plenum/test/node_catchup/test_node_catchup_with_connection_problem.py index 3a417b754d..cbb5878d11 100644 --- a/plenum/test/node_catchup/test_node_catchup_with_connection_problem.py +++ b/plenum/test/node_catchup/test_node_catchup_with_connection_problem.py @@ -3,7 +3,7 @@ from plenum.common.messages.node_messages import LedgerStatus, ConsistencyProof from plenum.common.util import getCallableName from plenum.server.router import Route -from plenum.test.helper import sdk_send_random_and_check +from plenum.test.helper import vdr_send_random_and_check from plenum.test.node_catchup.helper import waitNodeDataEquality from plenum.test.pool_transactions.helper import \ disconnect_node_and_ensure_disconnected @@ -26,8 +26,8 @@ def lost_count(request): # This test hangs on the 4th iteration. Investigation required. def test_catchup_with_lost_ledger_status(txnPoolNodeSet, looper, - sdk_pool_handle, - sdk_wallet_steward, + vdr_pool_handle, + vdr_wallet_steward, tconf, tdir, allPluginsPath, @@ -38,16 +38,16 @@ def test_catchup_with_lost_ledger_status(txnPoolNodeSet, node_to_disconnect = txnPoolNodeSet[-1] - sdk_send_random_and_check(looper, txnPoolNodeSet, - sdk_pool_handle, sdk_wallet_steward, 5) + vdr_send_random_and_check(looper, txnPoolNodeSet, + vdr_pool_handle, vdr_wallet_steward, 5) # restart node disconnect_node_and_ensure_disconnected(looper, txnPoolNodeSet, node_to_disconnect) looper.removeProdable(name=node_to_disconnect.name) - sdk_send_random_and_check(looper, txnPoolNodeSet, - sdk_pool_handle, sdk_wallet_steward, + vdr_send_random_and_check(looper, txnPoolNodeSet, + vdr_pool_handle, vdr_wallet_steward, 2) nodeHa, nodeCHa = HA(*node_to_disconnect.nodestack.ha), HA( @@ -83,8 +83,8 @@ def unpatch_after_call(status, frm): # @pytest.mark.skip(reason="This test hangs on the first iteration. Investigation required; https://github.com/hyperledger/indy-plenum/issues/1546.") def test_catchup_with_lost_first_consistency_proofs(txnPoolNodeSet, looper, - sdk_pool_handle, - sdk_wallet_steward, + vdr_pool_handle, + vdr_wallet_steward, tconf, tdir, allPluginsPath, @@ -97,16 +97,16 @@ def test_catchup_with_lost_first_consistency_proofs(txnPoolNodeSet, Test makes sure that the node eventually finishes catchup''' node_to_disconnect = txnPoolNodeSet[-1] - sdk_send_random_and_check(looper, txnPoolNodeSet, - sdk_pool_handle, sdk_wallet_steward, 5) + vdr_send_random_and_check(looper, txnPoolNodeSet, + vdr_pool_handle, vdr_wallet_steward, 5) # restart node disconnect_node_and_ensure_disconnected(looper, txnPoolNodeSet, node_to_disconnect) looper.removeProdable(name=node_to_disconnect.name) - sdk_send_random_and_check(looper, txnPoolNodeSet, - sdk_pool_handle, sdk_wallet_steward, + vdr_send_random_and_check(looper, txnPoolNodeSet, + vdr_pool_handle, vdr_wallet_steward, 2) nodeHa, nodeCHa = HA(*node_to_disconnect.nodestack.ha), HA( @@ -141,24 +141,24 @@ def unpatch_after_call(proof, frm): # @pytest.mark.skip(reason="This test hangs on the first iteration. Investigation required; https://github.com/hyperledger/indy-plenum/issues/1546.") def test_cancel_request_cp_and_ls_after_catchup(txnPoolNodeSet, looper, - sdk_pool_handle, - sdk_wallet_steward, + vdr_pool_handle, + vdr_wallet_steward, tconf, tdir, allPluginsPath): '''Test cancel of schedule with requesting ledger statuses and consistency proofs after catchup.''' node_to_disconnect = txnPoolNodeSet[-1] - sdk_send_random_and_check(looper, txnPoolNodeSet, - sdk_pool_handle, sdk_wallet_steward, 5) + vdr_send_random_and_check(looper, txnPoolNodeSet, + vdr_pool_handle, vdr_wallet_steward, 5) # restart node disconnect_node_and_ensure_disconnected(looper, txnPoolNodeSet, node_to_disconnect) looper.removeProdable(name=node_to_disconnect.name) - sdk_send_random_and_check(looper, txnPoolNodeSet, - sdk_pool_handle, sdk_wallet_steward, + vdr_send_random_and_check(looper, txnPoolNodeSet, + vdr_pool_handle, vdr_wallet_steward, 2) # add node_to_disconnect to pool node_to_disconnect = start_stopped_node(node_to_disconnect, looper, tconf, diff --git a/plenum/test/node_catchup/test_node_reject_invalid_txn_during_catchup.py b/plenum/test/node_catchup/test_node_reject_invalid_txn_during_catchup.py index ac02cb177c..70475a4daf 100644 --- a/plenum/test/node_catchup/test_node_reject_invalid_txn_during_catchup.py +++ b/plenum/test/node_catchup/test_node_reject_invalid_txn_during_catchup.py @@ -3,7 +3,7 @@ from plenum.common.txn_util import get_type, set_type from plenum.test.delayers import lsDelay, delay_3pc -from plenum.test.helper import sdk_send_random_and_check, assert_in +from plenum.test.helper import vdr_send_random_and_check, assert_in from plenum.common.ledger import Ledger from plenum.test.stasher import delay_rules, delay_rules_without_processing @@ -27,7 +27,7 @@ def tconf(tconf): tconf.CATCHUP_BATCH_SIZE = old -def test_node_reject_invalid_txn_during_catchup(looper, sdk_pool_handle, sdk_wallet_client, +def test_node_reject_invalid_txn_during_catchup(looper, vdr_pool_handle, vdr_wallet_client, tconf, tdir, txnPoolNodeSet, bad_node, lagging_node): """ @@ -38,7 +38,7 @@ def test_node_reject_invalid_txn_during_catchup(looper, sdk_pool_handle, sdk_wal normal_stashers = [node.nodeIbStasher for node in normal_nodes] with delay_rules_without_processing(lagging_node.nodeIbStasher, delay_3pc()): - sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_client, 5) + vdr_send_random_and_check(looper, txnPoolNodeSet, vdr_pool_handle, vdr_wallet_client, 5) # Perform catchup, while making sure that cons proof from bad node is received # before cons proofs from normal nodes, so bad node can participate in catchup diff --git a/plenum/test/node_catchup/test_node_request_consistency_proof.py b/plenum/test/node_catchup/test_node_request_consistency_proof.py index 87ca865b0e..85636a5044 100644 --- a/plenum/test/node_catchup/test_node_request_consistency_proof.py +++ b/plenum/test/node_catchup/test_node_request_consistency_proof.py @@ -10,7 +10,7 @@ from plenum.test.stasher import delay_rules from stp_core.common.log import getlogger from plenum.common.messages.node_messages import LedgerStatus -from plenum.test.helper import sdk_send_random_requests, sdk_send_random_and_check +from plenum.test.helper import vdr_send_random_requests, vdr_send_random_and_check from plenum.test.node_catchup.helper import waitNodeDataEquality, ensure_all_nodes_have_same_data # Do not remove the next imports @@ -25,15 +25,15 @@ def test_node_request_consistency_proof(tdir, tconf, looper, txnPoolNodeSet, - sdk_pool_handle, - sdk_wallet_client, + vdr_pool_handle, + vdr_wallet_client, monkeypatch): lagging_node = txnPoolNodeSet[-1] other_nodes = txnPoolNodeSet[:-1] # Preseed pool with some transactions - sdk_send_random_and_check(looper, txnPoolNodeSet, - sdk_pool_handle, sdk_wallet_client, 4) + vdr_send_random_and_check(looper, txnPoolNodeSet, + vdr_pool_handle, vdr_wallet_client, 4) # Make some node send different ledger statuses so it doesn't get enough similar # consisistency proofs @@ -65,8 +65,8 @@ def build_broken_ledger_status(ledger_id: int, provider: CatchupDataProvider): # Block lagging node from ordering transactions with delay_rules(lagging_node.nodeIbStasher, ppDelay(), pDelay(), cDelay()): # Order some transactions on pool - sdk_send_random_and_check(looper, txnPoolNodeSet, - sdk_pool_handle, sdk_wallet_client, 4) + vdr_send_random_and_check(looper, txnPoolNodeSet, + vdr_pool_handle, vdr_wallet_client, 4) # Start catchup on lagging node lagging_node.ledgerManager.start_catchup() diff --git a/plenum/test/node_catchup/test_node_request_missing_transactions.py b/plenum/test/node_catchup/test_node_request_missing_transactions.py index c90332f439..db1232e2f7 100644 --- a/plenum/test/node_catchup/test_node_request_missing_transactions.py +++ b/plenum/test/node_catchup/test_node_request_missing_transactions.py @@ -4,9 +4,9 @@ from plenum.common.messages.node_messages import CatchupReq from plenum.test import waits -from plenum.test.helper import sdk_send_random_requests, sdk_send_random_and_check +from plenum.test.helper import vdr_send_random_requests, vdr_send_random_and_check from plenum.test.node_catchup.helper import waitNodeDataEquality -from plenum.test.pool_transactions.helper import sdk_add_new_steward_and_node +from plenum.test.pool_transactions.helper import vdr_add_new_steward_and_node from plenum.test.test_node import checkNodesConnected, getNonPrimaryReplicas from stp_core.common.log import getlogger @@ -31,7 +31,7 @@ def reset(): def testNodeRequestingTxns(reduced_catchup_timeout_conf, txnPoolNodeSet, looper, tdir, tconf, - allPluginsPath, sdk_pool_handle, sdk_wallet_steward, sdk_wallet_client): + allPluginsPath, vdr_pool_handle, vdr_wallet_steward, vdr_wallet_client): """ A newly joined node is catching up and sends catchup requests to other nodes but one of the nodes does not reply and the newly joined node cannot @@ -50,10 +50,10 @@ def ignoreCatchupReq(self, req, frm): badNode.nodeMsgRouter.routes[CatchupReq] = types.MethodType( ignoreCatchupReq, badNode.ledgerManager) more_requests = 10 - sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_client, more_requests) + vdr_send_random_and_check(looper, txnPoolNodeSet, vdr_pool_handle, vdr_wallet_client, more_requests) - _, new_node = sdk_add_new_steward_and_node( - looper, sdk_pool_handle, sdk_wallet_steward, + _, new_node = vdr_add_new_steward_and_node( + looper, vdr_pool_handle, vdr_wallet_steward, 'EpsilonSteward', 'Epsilon', tdir, tconf, allPluginsPath=allPluginsPath) txnPoolNodeSet.append(new_node) @@ -67,7 +67,7 @@ def ignoreCatchupReq(self, req, frm): customTimeout=timeout, exclude_from_check=['check_last_ordered_3pc_backup']) - sdk_send_random_requests(looper, sdk_pool_handle, sdk_wallet_client, 2) + vdr_send_random_requests(looper, vdr_pool_handle, vdr_wallet_client, 2) waitNodeDataEquality(looper, new_node, *txnPoolNodeSet[:-1], customTimeout=timeout, exclude_from_check=['check_last_ordered_3pc_backup']) diff --git a/plenum/test/node_catchup/test_not_set_H_as_maxsize_for_backup_if_is_primary.py b/plenum/test/node_catchup/test_not_set_H_as_maxsize_for_backup_if_is_primary.py index 403741bf55..e90a81bae0 100644 --- a/plenum/test/node_catchup/test_not_set_H_as_maxsize_for_backup_if_is_primary.py +++ b/plenum/test/node_catchup/test_not_set_H_as_maxsize_for_backup_if_is_primary.py @@ -1,7 +1,7 @@ import pytest from plenum.test.delayers import delay_3pc -from plenum.test.helper import sdk_send_random_and_check +from plenum.test.helper import vdr_send_random_and_check from plenum.test.pool_transactions.helper import disconnect_node_and_ensure_disconnected from plenum.test.stasher import delay_rules from plenum.test.test_node import ensureElectionsDone @@ -31,8 +31,8 @@ def tconf(tconf): def test_set_H_as_maxsize_for_backup_if_is_primary(looper, txnPoolNodeSet, - sdk_pool_handle, - sdk_wallet_steward, + vdr_pool_handle, + vdr_wallet_steward, tconf, tdir, allPluginsPath): @@ -55,10 +55,10 @@ def test_set_H_as_maxsize_for_backup_if_is_primary(looper, ensureElectionsDone(looper, txnPoolNodeSet, customTimeout=tconf.NEW_VIEW_TIMEOUT) - sdk_send_random_and_check(looper, + vdr_send_random_and_check(looper, txnPoolNodeSet, - sdk_pool_handle, - sdk_wallet_steward, + vdr_pool_handle, + vdr_wallet_steward, LOG_SIZE) # Check restored state diff --git a/plenum/test/node_catchup/test_post_genesis_txn_from_catchup_added_to_ledger.py b/plenum/test/node_catchup/test_post_genesis_txn_from_catchup_added_to_ledger.py index 127d2e214a..6cd1f2b78c 100644 --- a/plenum/test/node_catchup/test_post_genesis_txn_from_catchup_added_to_ledger.py +++ b/plenum/test/node_catchup/test_post_genesis_txn_from_catchup_added_to_ledger.py @@ -2,7 +2,7 @@ from plenum.common.constants import DOMAIN_LEDGER_ID, CURRENT_PROTOCOL_VERSION from plenum.common.txn_util import reqToTxn -from plenum.test.helper import sdk_random_request_objects +from plenum.test.helper import vdr_random_request_objects def test_post_genesis_txn_from_catchup_added_to_ledger(looper, txnPoolNodeSet): @@ -10,7 +10,7 @@ def test_post_genesis_txn_from_catchup_added_to_ledger(looper, txnPoolNodeSet): def add_txn_to_ledger(txn_time: Optional[int]) -> dict: nonlocal node - req = sdk_random_request_objects(1, CURRENT_PROTOCOL_VERSION, identifier='someidentifier')[0] + req = vdr_random_request_objects(1, CURRENT_PROTOCOL_VERSION, identifier='someidentifier')[0] txn = reqToTxn(req) node.domainLedger.append_txns_metadata([txn], txn_time=txn_time) node.domainLedger.appendTxns([txn]) diff --git a/plenum/test/node_catchup/test_process_catchup_replies.py b/plenum/test/node_catchup/test_process_catchup_replies.py index e53a07a93d..719c8983f7 100644 --- a/plenum/test/node_catchup/test_process_catchup_replies.py +++ b/plenum/test/node_catchup/test_process_catchup_replies.py @@ -7,7 +7,7 @@ from plenum.common.types import f from plenum.common.util import SortedDict from plenum.server.catchup.utils import CatchupTill -from plenum.test.helper import sdk_signed_random_requests +from plenum.test.helper import vdr_signed_random_requests ledger_id = DOMAIN_LEDGER_ID @@ -22,7 +22,7 @@ def _add_txns_to_ledger(node, looper, sdk_wallet_client, num_txns_in_reply, repl ledger_manager = node.ledgerManager ledger = ledger_manager.ledgerRegistry[ledger_id].ledger catchup_rep_service = ledger_manager._node_leecher._leechers[ledger_id]._catchup_rep_service - reqs = sdk_signed_random_requests(looper, sdk_wallet_client, txn_count) + reqs = vdr_signed_random_requests(looper, sdk_wallet_client, txn_count) # add transactions to ledger for req in reqs: txn = append_txn_metadata(reqToTxn(req), txn_time=12345678) @@ -67,7 +67,7 @@ def check_replies_applied(old_ledger_size, ledger, catchup_rep_service, frm, rep return ledger.size -def test_process_catchup_replies(txnPoolNodeSet, looper, sdk_wallet_client): +def test_process_catchup_replies(txnPoolNodeSet, looper, vdr_wallet_client): ''' Test correct work of method processCatchupRep and that sending replies in reverse order will call a few iterations of cycle in _processCatchupRep @@ -81,7 +81,7 @@ def test_process_catchup_replies(txnPoolNodeSet, looper, sdk_wallet_client): catchup_till, catchup_reps = _add_txns_to_ledger(txnPoolNodeSet[1], looper, - sdk_wallet_client, + vdr_wallet_client, num_txns_in_reply, reply_count) catchup_rep_service._catchup_till = catchup_till @@ -90,43 +90,43 @@ def test_process_catchup_replies(txnPoolNodeSet, looper, sdk_wallet_client): # send replies in next order: 2, 3, 1 # and check that after sending reply 1, replies 2 and 3 will be applied. reply2 = catchup_reps[1] - ledger_manager.processCatchupRep(reply2, sdk_wallet_client[1]) - check_reply_not_applied(ledger_size, ledger, catchup_rep_service, sdk_wallet_client[1], reply2) + ledger_manager.processCatchupRep(reply2, vdr_wallet_client[1]) + check_reply_not_applied(ledger_size, ledger, catchup_rep_service, vdr_wallet_client[1], reply2) reply3 = catchup_reps[2] - ledger_manager.processCatchupRep(reply3, sdk_wallet_client[1]) - check_reply_not_applied(ledger_size, ledger, catchup_rep_service, sdk_wallet_client[1], reply2) - check_reply_not_applied(ledger_size, ledger, catchup_rep_service, sdk_wallet_client[1], reply3) + ledger_manager.processCatchupRep(reply3, vdr_wallet_client[1]) + check_reply_not_applied(ledger_size, ledger, catchup_rep_service, vdr_wallet_client[1], reply2) + check_reply_not_applied(ledger_size, ledger, catchup_rep_service, vdr_wallet_client[1], reply3) reply1 = catchup_reps[0] - ledger_manager.processCatchupRep(reply1, sdk_wallet_client[1]) + ledger_manager.processCatchupRep(reply1, vdr_wallet_client[1]) ledger_size = check_replies_applied(ledger_size, ledger, catchup_rep_service, - sdk_wallet_client[1], + vdr_wallet_client[1], [reply1, reply2, reply3]) # send replies in next order: 6, 4, 5 # and check that after sending reply 4, it will be applied. # Check that after sending reply 5, replies 5 and 6 will be applied. reply6 = catchup_reps[5] - ledger_manager.processCatchupRep(reply6, sdk_wallet_client[1]) - check_reply_not_applied(ledger_size, ledger, catchup_rep_service, sdk_wallet_client[1], reply6) + ledger_manager.processCatchupRep(reply6, vdr_wallet_client[1]) + check_reply_not_applied(ledger_size, ledger, catchup_rep_service, vdr_wallet_client[1], reply6) reply4 = catchup_reps[3] - ledger_manager.processCatchupRep(reply4, sdk_wallet_client[1]) - ledger_size = check_replies_applied(ledger_size, ledger, catchup_rep_service, sdk_wallet_client[1], [reply4]) - check_reply_not_applied(ledger_size, ledger, catchup_rep_service, sdk_wallet_client[1], reply6) + ledger_manager.processCatchupRep(reply4, vdr_wallet_client[1]) + ledger_size = check_replies_applied(ledger_size, ledger, catchup_rep_service, vdr_wallet_client[1], [reply4]) + check_reply_not_applied(ledger_size, ledger, catchup_rep_service, vdr_wallet_client[1], reply6) reply5 = catchup_reps[4] - ledger_manager.processCatchupRep(reply5, sdk_wallet_client[1]) - ledger_size = check_replies_applied(ledger_size, ledger, catchup_rep_service, sdk_wallet_client[1], [reply5, + ledger_manager.processCatchupRep(reply5, vdr_wallet_client[1]) + ledger_size = check_replies_applied(ledger_size, ledger, catchup_rep_service, vdr_wallet_client[1], [reply5, reply6]) assert not catchup_rep_service._received_catchup_replies_from assert not catchup_rep_service._received_catchup_txns -def test_process_invalid_catchup_reply(txnPoolNodeSet, looper, sdk_wallet_client): +def test_process_invalid_catchup_reply(txnPoolNodeSet, looper, vdr_wallet_client): ''' Test correct work of method processCatchupRep and that sending replies in reverse order will call a few iterations of cycle in _processCatchupRep @@ -140,7 +140,7 @@ def test_process_invalid_catchup_reply(txnPoolNodeSet, looper, sdk_wallet_client catchup_till, catchup_reps = _add_txns_to_ledger(txnPoolNodeSet[1], looper, - sdk_wallet_client, + vdr_wallet_client, num_txns_in_reply, reply_count) catchup_rep_service._catchup_till = catchup_till @@ -149,38 +149,38 @@ def test_process_invalid_catchup_reply(txnPoolNodeSet, looper, sdk_wallet_client # make invalid catchup reply by dint of adding new transaction in it reply2 = catchup_reps[1] txns = OrderedDict(getattr(reply2, f.TXNS.nm)) - req = sdk_signed_random_requests(looper, sdk_wallet_client, 1)[0] + req = vdr_signed_random_requests(looper, vdr_wallet_client, 1)[0] txns[str(ledger_size + 4)] = append_txn_metadata(reqToTxn(req), txn_time=12345678) invalid_reply2 = CatchupRep(ledger_id, txns, getattr(reply2, f.CONS_PROOF.nm)) # process 2nd interval with invalid catchup reply ledger_manager.processCatchupRep(invalid_reply2, - sdk_wallet_client[1]) + vdr_wallet_client[1]) # check that invalid transaction was not added to ledger, but add to ledger_info.receivedCatchUpReplies - check_reply_not_applied(ledger_size, ledger, catchup_rep_service, sdk_wallet_client[1], invalid_reply2) + check_reply_not_applied(ledger_size, ledger, catchup_rep_service, vdr_wallet_client[1], invalid_reply2) # process valid reply from 1st interval reply1 = catchup_reps[0] - ledger_manager.processCatchupRep(reply1, sdk_wallet_client[1]) + ledger_manager.processCatchupRep(reply1, vdr_wallet_client[1]) # check that only valid reply added to ledger ledger_size = check_replies_applied(ledger_size, ledger, catchup_rep_service, - sdk_wallet_client[1], + vdr_wallet_client[1], [reply1]) # check that invalid reply was removed from ledger_info.receivedCatchUpReplies received_replies = {str(seq_no) for seq_no, _ in catchup_rep_service._received_catchup_txns} assert not set(reply2.txns.keys()).issubset(received_replies) - assert not catchup_rep_service._received_catchup_replies_from[sdk_wallet_client[1]] + assert not catchup_rep_service._received_catchup_replies_from[vdr_wallet_client[1]] # check that valid reply for 2nd interval was added to ledger reply2 = catchup_reps[1] - ledger_manager.processCatchupRep(reply2, sdk_wallet_client[1]) + ledger_manager.processCatchupRep(reply2, vdr_wallet_client[1]) ledger_size = check_replies_applied(ledger_size, ledger, catchup_rep_service, - sdk_wallet_client[1], + vdr_wallet_client[1], [reply2]) assert not catchup_rep_service._received_catchup_replies_from assert not catchup_rep_service._received_catchup_txns diff --git a/plenum/test/node_catchup/test_remove_request_keys_post_catchup.py b/plenum/test/node_catchup/test_remove_request_keys_post_catchup.py index b80175423f..ed75fa37c3 100644 --- a/plenum/test/node_catchup/test_remove_request_keys_post_catchup.py +++ b/plenum/test/node_catchup/test_remove_request_keys_post_catchup.py @@ -5,7 +5,7 @@ from plenum.test.delayers import delay_3pc_messages, pDelay, cDelay, ppDelay, \ cr_delay from plenum.test.helper import send_reqs_batches_and_get_suff_replies, \ - check_last_ordered_3pc, sdk_json_couples_to_request_list, assertExp + check_last_ordered_3pc, vdr_json_couples_to_request_list, assertExp from plenum.test.node_catchup.helper import ensure_all_nodes_have_same_data from plenum.test.test_node import getNonPrimaryReplicas, ensureElectionsDone from plenum.test.view_change.helper import ensure_view_change @@ -27,19 +27,19 @@ def setup(request, looper, txnPoolNodeSet): def test_nodes_removes_request_keys_for_ordered(setup, looper, txnPoolNodeSet, - sdk_pool_handle, - sdk_wallet_client): + vdr_pool_handle, + vdr_wallet_client): """ A node does not order requests since it is missing some 3PC messages, gets them from catchup. It then clears them from its request queues """ slow_node, fast_nodes = setup - reqs = sdk_json_couples_to_request_list( + reqs = vdr_json_couples_to_request_list( send_reqs_batches_and_get_suff_replies( looper, txnPoolNodeSet, - sdk_pool_handle, - sdk_wallet_client, + vdr_pool_handle, + vdr_wallet_client, 10, 5)) ensure_all_nodes_have_same_data(looper, fast_nodes) diff --git a/plenum/test/node_catchup/test_revert_during_catchup.py b/plenum/test/node_catchup/test_revert_during_catchup.py index dbbcc165aa..c490e9b360 100644 --- a/plenum/test/node_catchup/test_revert_during_catchup.py +++ b/plenum/test/node_catchup/test_revert_during_catchup.py @@ -7,7 +7,7 @@ from plenum.test import waits from plenum.test.delayers import cDelay, cr_delay, lsDelay from plenum.test.helper import check_last_ordered_3pc, \ - assertEquality, sdk_send_random_and_check + assertEquality, vdr_send_random_and_check from plenum.test.node_catchup.helper import waitNodeDataInequality, \ ensure_all_nodes_have_same_data, make_a_node_catchup_less, \ repair_node_catchup_less @@ -37,8 +37,8 @@ def tconf(tconf): @pytest.mark.skip(reason="We don't make a catchup during new view_change") def test_slow_node_reverts_unordered_state_during_catchup(looper, txnPoolNodeSet, - sdk_pool_handle, - sdk_wallet_client): + vdr_pool_handle, + vdr_wallet_client): """ Delay COMMITs to a node such that when it needs to catchup, it needs to revert some unordered state. Also till this time the node should have @@ -49,8 +49,8 @@ def test_slow_node_reverts_unordered_state_during_catchup(looper, try to process delayed COMMITs, some COMMITs will be rejected but some will be processed since catchup was done for older ledger. """ - sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle, - sdk_wallet_client, 3 * Max3PCBatchSize) + vdr_send_random_and_check(looper, txnPoolNodeSet, vdr_pool_handle, + vdr_wallet_client, 3 * Max3PCBatchSize) nprs = getNonPrimaryReplicas(txnPoolNodeSet, 0) slow_node = nprs[-1].node other_nodes = [n for n in txnPoolNodeSet if n != slow_node] @@ -73,8 +73,8 @@ def test_slow_node_reverts_unordered_state_during_catchup(looper, make_a_node_catchup_less(slow_node, other_nodes, DOMAIN_LEDGER_ID, delay_batches * Max3PCBatchSize) - sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle, - sdk_wallet_client, 6 * Max3PCBatchSize) + vdr_send_random_and_check(looper, txnPoolNodeSet, vdr_pool_handle, + vdr_wallet_client, 6 * Max3PCBatchSize) ensure_all_nodes_have_same_data(looper, other_nodes) waitNodeDataInequality(looper, slow_node, *other_nodes) @@ -142,6 +142,6 @@ def chk5(): # make sure that the pool is functional checkProtocolInstanceSetup(looper, txnPoolNodeSet, retryWait=1) ensure_all_nodes_have_same_data(looper, nodes=txnPoolNodeSet) - sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle, - sdk_wallet_client, 2 * Max3PCBatchSize) + vdr_send_random_and_check(looper, txnPoolNodeSet, vdr_pool_handle, + vdr_wallet_client, 2 * Max3PCBatchSize) ensure_all_nodes_have_same_data(looper, nodes=txnPoolNodeSet) diff --git a/plenum/test/node_catchup/test_set_H_to_maxsize_and_not_stash_on_backup.py b/plenum/test/node_catchup/test_set_H_to_maxsize_and_not_stash_on_backup.py index efe2532cf9..8b50fd413f 100644 --- a/plenum/test/node_catchup/test_set_H_to_maxsize_and_not_stash_on_backup.py +++ b/plenum/test/node_catchup/test_set_H_to_maxsize_and_not_stash_on_backup.py @@ -3,7 +3,7 @@ from plenum.server.replica_validator_enums import STASH_WATERMARKS from plenum.test.delayers import nv_delay -from plenum.test.helper import sdk_send_random_and_check, waitForViewChange +from plenum.test.helper import vdr_send_random_and_check, waitForViewChange from plenum.test.node_catchup.helper import ensure_all_nodes_have_same_data from plenum.test.stasher import delay_rules from plenum.test.test_node import ensureElectionsDone @@ -31,8 +31,8 @@ def tconf(tconf): def test_set_H_greater_then_last_ppseqno(looper, txnPoolNodeSet, - sdk_pool_handle, - sdk_wallet_steward, + vdr_pool_handle, + vdr_wallet_steward, tdir, tconf, allPluginsPath): @@ -40,7 +40,7 @@ def test_set_H_greater_then_last_ppseqno(looper, # send LOG_SIZE requests and check, that all watermarks on all replicas is not changed # and now is (0, LOG_SIZE) """Send random requests for moving watermarks""" - sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_steward, LOG_SIZE) + vdr_send_random_and_check(looper, txnPoolNodeSet, vdr_pool_handle, vdr_wallet_steward, LOG_SIZE) # check, that all of node set up watermark greater, then default and # ppSeqNo with number LOG_SIZE + 1 will be out from default watermark assert txnPoolNodeSet[0].replicas[1].last_ordered_3pc[1] == LOG_SIZE @@ -49,8 +49,8 @@ def test_set_H_greater_then_last_ppseqno(looper, assert r.h >= LOG_SIZE assert r.H >= LOG_SIZE + LOG_SIZE """Adding new node, for scheduling propagate primary procedure""" - new_node = add_new_node(looper, txnPoolNodeSet, sdk_pool_handle, - sdk_wallet_steward, tdir, tconf, allPluginsPath) + new_node = add_new_node(looper, txnPoolNodeSet, vdr_pool_handle, + vdr_wallet_steward, tdir, tconf, allPluginsPath) waitForViewChange(looper, txnPoolNodeSet, expectedViewNo=start_view_no + 1) ensure_all_nodes_have_same_data(looper, txnPoolNodeSet, exclude_from_check=['check_last_ordered_3pc_backup']) @@ -64,7 +64,7 @@ def test_set_H_greater_then_last_ppseqno(looper, assert r.h == 0 assert r.H == sys.maxsize """Send requests and check. that backup replicas does not stashing it by outside watermarks reason""" - sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_steward, 1) + vdr_send_random_and_check(looper, txnPoolNodeSet, vdr_pool_handle, vdr_wallet_steward, 1) # check, that there is no any stashed "outside watermark" messages. for r in new_node.replicas.values(): assert r.stasher.stash_size(STASH_WATERMARKS) == 0 diff --git a/plenum/test/node_catchup/test_ts_store_after_catchup.py b/plenum/test/node_catchup/test_ts_store_after_catchup.py index 6d1141aa6d..6e5505ab0a 100644 --- a/plenum/test/node_catchup/test_ts_store_after_catchup.py +++ b/plenum/test/node_catchup/test_ts_store_after_catchup.py @@ -5,7 +5,7 @@ from plenum.server.request_handlers.static_taa_helper import StaticTAAHelper from plenum.test.buy_handler import BuyHandler from plenum.test.constants import GET_BUY -from plenum.test.helper import sdk_send_random_and_check +from plenum.test.helper import vdr_send_random_and_check from plenum.test.node_catchup.helper import waitNodeDataEquality from plenum.test.pool_transactions.helper import disconnect_node_and_ensure_disconnected from plenum.test.test_node import checkNodesConnected @@ -16,22 +16,22 @@ def test_fill_ts_store_after_catchup(txnPoolNodeSet, looper, - sdk_pool_handle, - sdk_wallet_steward, + vdr_pool_handle, + vdr_wallet_steward, tconf, tdir, allPluginsPath ): - sdk_send_random_and_check(looper, txnPoolNodeSet, - sdk_pool_handle, sdk_wallet_steward, 5) + vdr_send_random_and_check(looper, txnPoolNodeSet, + vdr_pool_handle, vdr_wallet_steward, 5) node_to_disconnect = txnPoolNodeSet[-1] disconnect_node_and_ensure_disconnected(looper, txnPoolNodeSet, node_to_disconnect) looper.removeProdable(name=node_to_disconnect.name) - sdk_replies = sdk_send_random_and_check(looper, txnPoolNodeSet, - sdk_pool_handle, sdk_wallet_steward, 2) + sdk_replies = vdr_send_random_and_check(looper, txnPoolNodeSet, + vdr_pool_handle, vdr_wallet_steward, 2) node_to_disconnect = start_stopped_node(node_to_disconnect, looper, tconf, tdir, allPluginsPath) @@ -59,13 +59,13 @@ def create_random_taa(): def test_fill_ts_store_for_config_after_catchup(txnPoolNodeSet, looper, - sdk_pool_handle, - sdk_wallet_trustee, + vdr_pool_handle, + vdr_wallet_trustee, tconf, tdir, allPluginsPath, set_txn_author_agreement_aml): - sdk_send_txn_author_agreement(looper, sdk_pool_handle, sdk_wallet_trustee, *create_random_taa(), + sdk_send_txn_author_agreement(looper, vdr_pool_handle, vdr_wallet_trustee, *create_random_taa(), ratified=get_utc_epoch() - 600) node_to_disconnect = txnPoolNodeSet[-1] @@ -73,7 +73,7 @@ def test_fill_ts_store_for_config_after_catchup(txnPoolNodeSet, txnPoolNodeSet, node_to_disconnect) looper.removeProdable(name=node_to_disconnect.name) - sdk_reply = sdk_send_txn_author_agreement(looper, sdk_pool_handle, sdk_wallet_trustee, *create_random_taa(), + sdk_reply = sdk_send_txn_author_agreement(looper, vdr_pool_handle, vdr_wallet_trustee, *create_random_taa(), ratified=get_utc_epoch() - 600) node_to_disconnect = start_stopped_node(node_to_disconnect, looper, tconf, diff --git a/plenum/test/node_catchup_with_3pc/helper.py b/plenum/test/node_catchup_with_3pc/helper.py index 5a49efc3b9..de49e51d0d 100644 --- a/plenum/test/node_catchup_with_3pc/helper.py +++ b/plenum/test/node_catchup_with_3pc/helper.py @@ -5,7 +5,7 @@ from plenum.common.util import compare_3PC_keys from plenum.server.catchup.node_leecher_service import NodeLeecherService from plenum.test.delayers import delay_3pc, cr_delay -from plenum.test.helper import sdk_send_random_pool_requests, sdk_get_and_check_replies +from plenum.test.helper import vdr_send_random_pool_requests, vdr_get_and_check_replies from plenum.test.node_catchup.helper import ensure_all_nodes_have_same_data from plenum.test.stasher import delay_rules from stp_core.loop.eventually import eventually @@ -49,8 +49,8 @@ def check_nodes_ordered_till(nodes: Iterable, view_no: int, pp_seq_no: int): # Order pool requests while delaying first two commits on lagging node with delay_rules(lagging_stasher, delay_3pc(before=init_pp_seq_no + 3, msgs=Commit)): # Send some pool requests - reqs = sdk_send_random_pool_requests(looper, sdk_pool_handle, sdk_wallet_new_steward, 4) - sdk_get_and_check_replies(looper, reqs) + reqs = vdr_send_random_pool_requests(looper, sdk_pool_handle, sdk_wallet_new_steward, 4) + vdr_get_and_check_replies(looper, reqs) # Make sure pool is in expected state for node in other_nodes: diff --git a/plenum/test/node_catchup_with_3pc/test_catchup_with_skipped_commits.py b/plenum/test/node_catchup_with_3pc/test_catchup_with_skipped_commits.py index 3f4ee0da40..eac677fd82 100644 --- a/plenum/test/node_catchup_with_3pc/test_catchup_with_skipped_commits.py +++ b/plenum/test/node_catchup_with_3pc/test_catchup_with_skipped_commits.py @@ -8,7 +8,7 @@ from plenum.common.util import compare_3PC_keys from plenum.server.catchup.node_leecher_service import NodeLeecherService from plenum.test.delayers import cr_delay, delay_3pc -from plenum.test.helper import sdk_send_random_and_check, sdk_send_random_requests, sdk_get_and_check_replies, \ +from plenum.test.helper import vdr_send_random_and_check, vdr_send_random_requests, vdr_get_and_check_replies, \ max_3pc_batch_limits from plenum.test.node_catchup.helper import ensure_all_nodes_have_same_data from plenum.test.stasher import delay_rules, start_delaying, stop_delaying_and_process @@ -26,8 +26,8 @@ def tconf(tconf): def test_catchup_with_skipped_commits(tdir, tconf, looper, txnPoolNodeSet, - sdk_pool_handle, - sdk_wallet_client): + vdr_pool_handle, + vdr_wallet_client): lagging_node = txnPoolNodeSet[-1] lagging_stasher = lagging_node.nodeIbStasher other_nodes = txnPoolNodeSet[:-1] @@ -47,7 +47,7 @@ def check_nodes_ordered_till(nodes: Iterable, view_no: int, pp_seq_no: int): assert compare_3PC_keys((view_no, pp_seq_no), node.master_replica.last_ordered_3pc) >= 0 # Preload nodes with some transactions - sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_client, 1) + vdr_send_random_and_check(looper, txnPoolNodeSet, vdr_pool_handle, vdr_wallet_client, 1) for node in txnPoolNodeSet: assert node.master_replica.last_ordered_3pc == (0, 1) @@ -57,7 +57,7 @@ def check_nodes_ordered_till(nodes: Iterable, view_no: int, pp_seq_no: int): start_delaying(lagging_stasher, delay_3pc(before=4, msgs=Commit)) # Send more requests - reqs = sdk_send_random_requests(looper, sdk_pool_handle, sdk_wallet_client, 6) + reqs = vdr_send_random_requests(looper, vdr_pool_handle, vdr_wallet_client, 6) # Wait until pool ordered till (0, 3) looper.run(eventually(check_nodes_ordered_till, other_nodes, 0, 3)) @@ -82,7 +82,7 @@ def check_nodes_ordered_till(nodes: Iterable, view_no: int, pp_seq_no: int): looper.run(eventually(check_lagging_node_done_catchup)) # Ensure that all requests were ordered - sdk_get_and_check_replies(looper, reqs) + vdr_get_and_check_replies(looper, reqs) # Ensure that all nodes will eventually have same data ensure_all_nodes_have_same_data(looper, txnPoolNodeSet) diff --git a/plenum/test/node_catchup_with_3pc/test_catchup_with_skipped_commits_received_before_catchup_audit.py b/plenum/test/node_catchup_with_3pc/test_catchup_with_skipped_commits_received_before_catchup_audit.py index d62e1fa4da..11dc2bc365 100644 --- a/plenum/test/node_catchup_with_3pc/test_catchup_with_skipped_commits_received_before_catchup_audit.py +++ b/plenum/test/node_catchup_with_3pc/test_catchup_with_skipped_commits_received_before_catchup_audit.py @@ -16,10 +16,10 @@ def tconf(tconf): def test_catchup_with_skipped_commits_received_before_catchup_audit(tdir, tconf, looper, txnPoolNodeSet, - sdk_pool_handle, - sdk_wallet_new_steward): + vdr_pool_handle, + vdr_wallet_new_steward): check_catchup_with_skipped_commits_received_before_catchup(NodeLeecherService.State.SyncingAudit, looper, txnPoolNodeSet, - sdk_pool_handle, - sdk_wallet_new_steward) + vdr_pool_handle, + vdr_wallet_new_steward) diff --git a/plenum/test/node_catchup_with_3pc/test_catchup_with_skipped_commits_received_before_catchup_pool.py b/plenum/test/node_catchup_with_3pc/test_catchup_with_skipped_commits_received_before_catchup_pool.py index f5a123d1a2..12289b07ae 100644 --- a/plenum/test/node_catchup_with_3pc/test_catchup_with_skipped_commits_received_before_catchup_pool.py +++ b/plenum/test/node_catchup_with_3pc/test_catchup_with_skipped_commits_received_before_catchup_pool.py @@ -16,10 +16,10 @@ def tconf(tconf): def test_catchup_with_skipped_commits_received_before_catchup_pool(tdir, tconf, looper, txnPoolNodeSet, - sdk_pool_handle, - sdk_wallet_new_steward): + vdr_pool_handle, + vdr_wallet_new_steward): check_catchup_with_skipped_commits_received_before_catchup(NodeLeecherService.State.SyncingPool, looper, txnPoolNodeSet, - sdk_pool_handle, - sdk_wallet_new_steward) + vdr_pool_handle, + vdr_wallet_new_steward) diff --git a/plenum/test/node_catchup_with_3pc/test_limited_stashing_3pc_while_catchup.py b/plenum/test/node_catchup_with_3pc/test_limited_stashing_3pc_while_catchup.py index 57c90c0b33..92d4177966 100644 --- a/plenum/test/node_catchup_with_3pc/test_limited_stashing_3pc_while_catchup.py +++ b/plenum/test/node_catchup_with_3pc/test_limited_stashing_3pc_while_catchup.py @@ -9,8 +9,8 @@ from plenum.test.delayers import cr_delay, msg_rep_delay from plenum.test.pool_transactions.helper import \ disconnect_node_and_ensure_disconnected -from plenum.test.helper import sdk_send_random_and_check, assertExp, max_3pc_batch_limits, \ - sdk_send_batches_of_random_and_check, check_last_ordered_3pc_on_master +from plenum.test.helper import vdr_send_random_and_check, assertExp, max_3pc_batch_limits, \ + vdr_send_batches_of_random_and_check, check_last_ordered_3pc_on_master from plenum.test.node_catchup.helper import waitNodeDataEquality from plenum.test.stasher import delay_rules from plenum.test.test_node import checkNodesConnected @@ -37,8 +37,8 @@ def test_limited_stash_3pc_while_catchup(tdir, tconf, looper, testNodeClass, txnPoolNodeSet, - sdk_pool_handle, - sdk_wallet_client, + vdr_pool_handle, + vdr_wallet_client, allPluginsPath, chkFreqPatched): ''' @@ -51,8 +51,8 @@ def test_limited_stash_3pc_while_catchup(tdir, tconf, rest_nodes = txnPoolNodeSet[:-1] # Check that requests executed well - sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle, - sdk_wallet_client, 1) + vdr_send_random_and_check(looper, txnPoolNodeSet, vdr_pool_handle, + vdr_wallet_client, 1) # Stop one node waitNodeDataEquality(looper, lagging_node, *rest_nodes) @@ -63,8 +63,8 @@ def test_limited_stash_3pc_while_catchup(tdir, tconf, looper.removeProdable(lagging_node) # Order 2 checkpoints on rest_nodes (2 txns in 2 batches) - sdk_send_batches_of_random_and_check(looper, txnPoolNodeSet, - sdk_pool_handle, sdk_wallet_client, + vdr_send_batches_of_random_and_check(looper, txnPoolNodeSet, + vdr_pool_handle, vdr_wallet_client, 2 * CHK_FREQ, 2) waitNodeDataEquality(looper, *rest_nodes) @@ -92,19 +92,19 @@ def test_limited_stash_3pc_while_catchup(tdir, tconf, timeout=60)) # Order 2 checkpoints in the first lagging node catchup (2 txns in 2 batches) - sdk_send_batches_of_random_and_check(looper, txnPoolNodeSet, - sdk_pool_handle, sdk_wallet_client, + vdr_send_batches_of_random_and_check(looper, txnPoolNodeSet, + vdr_pool_handle, vdr_wallet_client, 2 * CHK_FREQ, 2) # Order 2 checkpoints in the second lagging node catchup (2 txns in 2 batches) - sdk_send_batches_of_random_and_check(looper, txnPoolNodeSet, - sdk_pool_handle, sdk_wallet_client, + vdr_send_batches_of_random_and_check(looper, txnPoolNodeSet, + vdr_pool_handle, vdr_wallet_client, 2 * CHK_FREQ, 2) waitNodeDataEquality(looper, *txnPoolNodeSet, customTimeout=5, exclude_from_check=['check_last_ordered_3pc_backup']) - sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle, - sdk_wallet_client, 1) + vdr_send_random_and_check(looper, txnPoolNodeSet, vdr_pool_handle, + vdr_wallet_client, 1) looper.run( eventually( lambda: assertExp( diff --git a/plenum/test/node_catchup_with_3pc/test_preprepares_and_prepares_recovery_after_catchup.py b/plenum/test/node_catchup_with_3pc/test_preprepares_and_prepares_recovery_after_catchup.py index 746d4aa508..11c1ec2467 100644 --- a/plenum/test/node_catchup_with_3pc/test_preprepares_and_prepares_recovery_after_catchup.py +++ b/plenum/test/node_catchup_with_3pc/test_preprepares_and_prepares_recovery_after_catchup.py @@ -4,7 +4,7 @@ from plenum.test import waits from plenum.test.pool_transactions.helper import disconnect_node_and_ensure_disconnected -from plenum.test.helper import max_3pc_batch_limits, sdk_send_random_and_check +from plenum.test.helper import max_3pc_batch_limits, vdr_send_random_and_check from plenum.test.test_node import checkNodesConnected from plenum.test.view_change.helper import start_stopped_node from plenum.test.view_change_with_delays.helper import check_last_ordered @@ -47,8 +47,8 @@ def test_preprepares_and_prepares_recovery_after_catchup(tdir, tconf, looper, testNodeClass, txnPoolNodeSet, - sdk_pool_handle, - sdk_wallet_client, + vdr_pool_handle, + vdr_wallet_client, allPluginsPath, chkFreqPatched): """ @@ -57,7 +57,7 @@ def test_preprepares_and_prepares_recovery_after_catchup(tdir, tconf, node_to_restart = txnPoolNodeSet[-1] - sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_client, NUM_OF_REQ) + vdr_send_random_and_check(looper, txnPoolNodeSet, vdr_pool_handle, vdr_wallet_client, NUM_OF_REQ) # Check that all of the nodes except the slows one ordered the request looper.run(eventually(check_last_ordered, txnPoolNodeSet, (0, NUM_OF_REQ))) diff --git a/plenum/test/node_catchup_with_3pc/test_slow_catchup_while_ordering.py b/plenum/test/node_catchup_with_3pc/test_slow_catchup_while_ordering.py index 34cfbe413c..c547ddc6ac 100644 --- a/plenum/test/node_catchup_with_3pc/test_slow_catchup_while_ordering.py +++ b/plenum/test/node_catchup_with_3pc/test_slow_catchup_while_ordering.py @@ -6,7 +6,7 @@ from plenum.common.messages.node_messages import MessageReq, CatchupReq from plenum.server.catchup.node_leecher_service import NodeLeecherService from plenum.test.delayers import ppDelay, pDelay, cDelay, DEFAULT_DELAY -from plenum.test.helper import sdk_send_random_and_check +from plenum.test.helper import vdr_send_random_and_check from plenum.test.node_catchup.helper import ensure_all_nodes_have_same_data from plenum.test.stasher import delay_rules from stp_core.loop.eventually import eventually @@ -30,8 +30,8 @@ def delay(msg): def test_slow_catchup_while_ordering(tdir, tconf, looper, txnPoolNodeSet, - sdk_pool_handle, - sdk_wallet_client): + vdr_pool_handle, + vdr_wallet_client): lagging_node = txnPoolNodeSet[-1] other_lagging_node = txnPoolNodeSet[-2] other_nodes = txnPoolNodeSet[:-1] @@ -46,8 +46,8 @@ def check_lagging_node_is_not_syncing_audit(): # Prevent lagging node from ordering with delay_rules(lagging_node.nodeIbStasher, ppDelay(), pDelay(), cDelay()): # Order request on all nodes except lagging one - sdk_send_random_and_check(looper, txnPoolNodeSet, - sdk_pool_handle, sdk_wallet_client, 1) + vdr_send_random_and_check(looper, txnPoolNodeSet, + vdr_pool_handle, vdr_wallet_client, 1) # Prevent lagging node from catching up domain ledger (and finishing catchup) with delay_rules(other_stashers, delay_domain_ledger_catchup()): @@ -60,8 +60,8 @@ def check_lagging_node_is_not_syncing_audit(): assert lagging_node_state() != NodeLeecherService.State.Idle # Order one more request on all nodes except lagging one - sdk_send_random_and_check(looper, txnPoolNodeSet, - sdk_pool_handle, sdk_wallet_client, 1) + vdr_send_random_and_check(looper, txnPoolNodeSet, + vdr_pool_handle, vdr_wallet_client, 1) # Now lagging node can catch up domain ledger which contains more transactions # than it was when audit ledger was caught up @@ -73,8 +73,8 @@ def check_lagging_node_is_not_syncing_audit(): # Ensure that even if we disable some other node pool is still functional # (it won't be the case if old lagging node is nonfunctional) with delay_rules(other_lagging_node.nodeIbStasher, ppDelay(), pDelay(), cDelay()): - sdk_send_random_and_check(looper, txnPoolNodeSet, - sdk_pool_handle, sdk_wallet_client, 1) + vdr_send_random_and_check(looper, txnPoolNodeSet, + vdr_pool_handle, vdr_wallet_client, 1) # Ensure that all nodes will eventually have same data ensure_all_nodes_have_same_data(looper, txnPoolNodeSet) diff --git a/plenum/test/node_catchup_with_3pc/test_stashing_3pc_while_catchup.py b/plenum/test/node_catchup_with_3pc/test_stashing_3pc_while_catchup.py index e40bb71443..7d68d2ff6f 100644 --- a/plenum/test/node_catchup_with_3pc/test_stashing_3pc_while_catchup.py +++ b/plenum/test/node_catchup_with_3pc/test_stashing_3pc_while_catchup.py @@ -9,7 +9,7 @@ from plenum.test.delayers import cr_delay from plenum.test.pool_transactions.helper import \ disconnect_node_and_ensure_disconnected -from plenum.test.helper import sdk_send_random_and_check, assertExp +from plenum.test.helper import vdr_send_random_and_check, assertExp from plenum.test.node_catchup.helper import waitNodeDataEquality from plenum.test.stasher import delay_rules from plenum.test.test_node import checkNodesConnected @@ -23,8 +23,8 @@ def test_3pc_while_catchup(tdir, tconf, looper, testNodeClass, txnPoolNodeSet, - sdk_pool_handle, - sdk_wallet_client, + vdr_pool_handle, + vdr_wallet_client, allPluginsPath): ''' Tests that requests being ordered during catch-up are stashed and re-applied @@ -36,8 +36,8 @@ def test_3pc_while_catchup(tdir, tconf, rest_nodes = txnPoolNodeSet[:-1] # Check that requests executed well - sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle, - sdk_wallet_client, 10) + vdr_send_random_and_check(looper, txnPoolNodeSet, vdr_pool_handle, + vdr_wallet_client, 10) # Stop one node waitNodeDataEquality(looper, lagging_node, *rest_nodes) @@ -48,8 +48,8 @@ def test_3pc_while_catchup(tdir, tconf, looper.removeProdable(lagging_node) # Send more requests to active nodes - sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle, - sdk_wallet_client, 10) + vdr_send_random_and_check(looper, txnPoolNodeSet, vdr_pool_handle, + vdr_wallet_client, 10) waitNodeDataEquality(looper, *rest_nodes) # Restart stopped node and wait for successful catch up @@ -76,8 +76,8 @@ def test_3pc_while_catchup(tdir, tconf, timeout=60)) # make sure that more requests are being ordered while catch-up is in progress - sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle, - sdk_wallet_client, 10) + vdr_send_random_and_check(looper, txnPoolNodeSet, vdr_pool_handle, + vdr_wallet_client, 10) assert lagging_node.mode == Mode.syncing assert all(replica.stasher.stash_size(STASH_CATCH_UP) > 0 for inst_id, replica in lagging_node.replicas.items()) diff --git a/plenum/test/node_catchup_with_3pc/test_stashing_3pc_while_catchup_checkpoints.py b/plenum/test/node_catchup_with_3pc/test_stashing_3pc_while_catchup_checkpoints.py index f3fcc0682e..0037f287fd 100644 --- a/plenum/test/node_catchup_with_3pc/test_stashing_3pc_while_catchup_checkpoints.py +++ b/plenum/test/node_catchup_with_3pc/test_stashing_3pc_while_catchup_checkpoints.py @@ -13,7 +13,7 @@ from plenum.test.delayers import cr_delay from plenum.test.pool_transactions.helper import \ disconnect_node_and_ensure_disconnected -from plenum.test.helper import sdk_send_random_and_check, assertExp, max_3pc_batch_limits, \ +from plenum.test.helper import vdr_send_random_and_check, assertExp, max_3pc_batch_limits, \ check_last_ordered_3pc_on_all_replicas, get_pp_seq_no from plenum.test.node_catchup.helper import waitNodeDataEquality from plenum.test.stasher import delay_rules @@ -40,8 +40,8 @@ def test_3pc_while_catchup_with_chkpoints(tdir, tconf, reqs_for_checkpoint, testNodeClass, txnPoolNodeSet, - sdk_pool_handle, - sdk_wallet_client, + vdr_pool_handle, + vdr_wallet_client, allPluginsPath): ''' Tests that 3PC messages and Checkpoints being ordered during catch-up are stashed and re-applied @@ -56,8 +56,8 @@ def test_3pc_while_catchup_with_chkpoints(tdir, tconf, rest_nodes = txnPoolNodeSet[:-1] # Check that requests executed well - sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle, - sdk_wallet_client, 1) + vdr_send_random_and_check(looper, txnPoolNodeSet, vdr_pool_handle, + vdr_wallet_client, 1) batches_count += 1 # Stop one node waitNodeDataEquality(looper, lagging_node, *rest_nodes) @@ -68,8 +68,8 @@ def test_3pc_while_catchup_with_chkpoints(tdir, tconf, looper.removeProdable(lagging_node) # Send more requests to active nodes - sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle, - sdk_wallet_client, 1) + vdr_send_random_and_check(looper, txnPoolNodeSet, vdr_pool_handle, + vdr_wallet_client, 1) batches_count += 1 waitNodeDataEquality(looper, *rest_nodes) @@ -101,8 +101,8 @@ def test_3pc_while_catchup_with_chkpoints(tdir, tconf, # stash enough stable checkpoints for starting a catch-up num_checkpoints = Replica.STASHED_CHECKPOINTS_BEFORE_CATCHUP + 1 num_reqs = reqs_for_checkpoint * num_checkpoints + 1 - sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle, - sdk_wallet_client, + vdr_send_random_and_check(looper, txnPoolNodeSet, vdr_pool_handle, + vdr_wallet_client, num_reqs) batches_count += num_reqs looper.run( diff --git a/plenum/test/node_catchup_with_3pc/test_stashing_3pc_while_catchup_only_checkpoints.py b/plenum/test/node_catchup_with_3pc/test_stashing_3pc_while_catchup_only_checkpoints.py index b463c657d0..12eb0d8d4e 100644 --- a/plenum/test/node_catchup_with_3pc/test_stashing_3pc_while_catchup_only_checkpoints.py +++ b/plenum/test/node_catchup_with_3pc/test_stashing_3pc_while_catchup_only_checkpoints.py @@ -12,7 +12,7 @@ ppDelay, pDelay, cDelay, msg_rep_delay, cr_delay from plenum.test.pool_transactions.helper import \ disconnect_node_and_ensure_disconnected -from plenum.test.helper import sdk_send_random_and_check, assertExp, max_3pc_batch_limits, \ +from plenum.test.helper import vdr_send_random_and_check, assertExp, max_3pc_batch_limits, \ check_last_ordered_3pc_on_all_replicas, check_last_ordered_3pc_on_master from plenum.test.node_catchup.helper import waitNodeDataEquality from plenum.test.stasher import delay_rules @@ -37,8 +37,8 @@ def test_3pc_while_catchup_with_chkpoints_only(tdir, tconf, reqs_for_checkpoint, testNodeClass, txnPoolNodeSet, - sdk_pool_handle, - sdk_wallet_client, + vdr_pool_handle, + vdr_wallet_client, allPluginsPath): ''' Check that catch-up is not started again even if a quorum of stashed checkpoints @@ -51,8 +51,8 @@ def test_3pc_while_catchup_with_chkpoints_only(tdir, tconf, rest_nodes = txnPoolNodeSet[:-1] # Check that requests executed well - sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle, - sdk_wallet_client, 1) + vdr_send_random_and_check(looper, txnPoolNodeSet, vdr_pool_handle, + vdr_wallet_client, 1) # Stop one node waitNodeDataEquality(looper, lagging_node, *rest_nodes) @@ -63,8 +63,8 @@ def test_3pc_while_catchup_with_chkpoints_only(tdir, tconf, looper.removeProdable(lagging_node) # Send more requests to active nodes - sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle, - sdk_wallet_client, 1) + vdr_send_random_and_check(looper, txnPoolNodeSet, vdr_pool_handle, + vdr_wallet_client, 1) waitNodeDataEquality(looper, *rest_nodes) # Restart stopped node and wait for successful catch up @@ -102,8 +102,8 @@ def test_3pc_while_catchup_with_chkpoints_only(tdir, tconf, # stash enough stable checkpoints for starting a catch-up num_checkpoints = Replica.STASHED_CHECKPOINTS_BEFORE_CATCHUP + 1 num_reqs = reqs_for_checkpoint * num_checkpoints + 1 - sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle, - sdk_wallet_client, + vdr_send_random_and_check(looper, txnPoolNodeSet, vdr_pool_handle, + vdr_wallet_client, num_reqs) looper.run( eventually(check_last_ordered_3pc_on_all_replicas, rest_nodes, diff --git a/plenum/test/node_catchup_with_3pc/test_stashing_checkpoints_after_view_change.py b/plenum/test/node_catchup_with_3pc/test_stashing_checkpoints_after_view_change.py index 7a61f7e08e..1ef9381c12 100644 --- a/plenum/test/node_catchup_with_3pc/test_stashing_checkpoints_after_view_change.py +++ b/plenum/test/node_catchup_with_3pc/test_stashing_checkpoints_after_view_change.py @@ -10,7 +10,7 @@ from plenum.test import waits from plenum.test.checkpoints.helper import check_for_nodes, check_stable_checkpoint, check_for_instance from plenum.test.delayers import lsDelay, nv_delay -from plenum.test.helper import sdk_send_random_and_check, assertExp, max_3pc_batch_limits, \ +from plenum.test.helper import vdr_send_random_and_check, assertExp, max_3pc_batch_limits, \ check_last_ordered_3pc_on_master, check_last_ordered_3pc_on_backup from plenum.test.node_catchup.helper import waitNodeDataEquality from plenum.test.stasher import delay_rules @@ -36,8 +36,8 @@ def test_checkpoints_after_view_change(tconf, chkFreqPatched, reqs_for_checkpoint, txnPoolNodeSet, - sdk_pool_handle, - sdk_wallet_client): + vdr_pool_handle, + vdr_wallet_client): ''' Tests that there is no infinite catchups if there is a quorum of stashed checkpoints received during the view change @@ -68,8 +68,8 @@ def test_checkpoints_after_view_change(tconf, # stash enough stable checkpoints for starting a catch-up num_checkpoints = Replica.STASHED_CHECKPOINTS_BEFORE_CATCHUP + 1 num_reqs = reqs_for_checkpoint * num_checkpoints + 1 - sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle, - sdk_wallet_client, + vdr_send_random_and_check(looper, txnPoolNodeSet, vdr_pool_handle, + vdr_wallet_client, num_reqs) looper.run( eventually(check_last_ordered_3pc_on_master, rest_nodes, diff --git a/plenum/test/node_request/helper.py b/plenum/test/node_request/helper.py index c6f9b741e8..8d70dc0d09 100644 --- a/plenum/test/node_request/helper.py +++ b/plenum/test/node_request/helper.py @@ -1,6 +1,6 @@ import operator -from plenum.test.helper import sdk_send_batches_of_random_and_check +from plenum.test.helper import vdr_send_batches_of_random_and_check, sdk_send_batches_of_random_and_check from plenum.test.node_catchup.helper import ensure_all_nodes_have_same_data @@ -10,6 +10,16 @@ def nodes_by_rank(txnPoolNodeSet): key=operator.itemgetter(0))] +def vdr_ensure_pool_functional(looper, nodes, sdk_wallet, sdk_pool, + num_reqs=10, num_batches=2): + vdr_send_batches_of_random_and_check(looper, + nodes, + sdk_pool, + sdk_wallet, + num_reqs, + num_batches) + ensure_all_nodes_have_same_data(looper, nodes, custom_timeout=30) + def sdk_ensure_pool_functional(looper, nodes, sdk_wallet, sdk_pool, num_reqs=10, num_batches=2): sdk_send_batches_of_random_and_check(looper, diff --git a/plenum/test/node_request/message_request/conftest.py b/plenum/test/node_request/message_request/conftest.py index 6be4329ea3..d9b3c7d75a 100644 --- a/plenum/test/node_request/message_request/conftest.py +++ b/plenum/test/node_request/message_request/conftest.py @@ -2,11 +2,11 @@ from plenum.common.util import check_if_all_equal_in_list from plenum.test.node_catchup.helper import ensure_all_nodes_have_same_data -from plenum.test.helper import sdk_send_random_and_check +from plenum.test.helper import vdr_send_random_and_check @pytest.fixture(scope="module") -def teardown(request, looper, txnPoolNodeSet, sdk_wallet_client, sdk_pool_handle): +def teardown(request, looper, txnPoolNodeSet, vdr_wallet_client, vdr_pool_handle): def tear(): # Repair any broken network for node in txnPoolNodeSet: @@ -22,6 +22,6 @@ def tear(): for n in txnPoolNodeSet]) # Check the network is functional since all nodes reply - sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_client, 5) + vdr_send_random_and_check(looper, txnPoolNodeSet, vdr_pool_handle, vdr_wallet_client, 5) request.addfinalizer(tear) diff --git a/plenum/test/node_request/message_request/test_node_requests_missing_preprepare.py b/plenum/test/node_request/message_request/test_node_requests_missing_preprepare.py index c87fcdcda7..2fbede605b 100644 --- a/plenum/test/node_request/message_request/test_node_requests_missing_preprepare.py +++ b/plenum/test/node_request/message_request/test_node_requests_missing_preprepare.py @@ -4,7 +4,7 @@ from plenum.common.util import check_if_all_equal_in_list from plenum.test.delayers import ppDelay -from plenum.test.helper import sdk_send_batches_of_random_and_check +from plenum.test.helper import vdr_send_batches_of_random_and_check from plenum.test.node_catchup.helper import waitNodeDataEquality from plenum.test.node_request.message_request.helper import split_nodes from plenum.test.spy_helpers import get_count @@ -13,7 +13,7 @@ def test_node_requests_missing_preprepare(looper, txnPoolNodeSet, - sdk_wallet_client, sdk_pool_handle, + vdr_wallet_client, vdr_pool_handle, teardown): """ A node has bad network with primary and thus loses PRE-PREPARE, @@ -32,10 +32,10 @@ def test_node_requests_missing_preprepare(looper, txnPoolNodeSet, old_count_mrp = get_count(slow_node.master_replica._message_req_service, slow_node.master_replica._message_req_service.process_message_rep) - sdk_send_batches_of_random_and_check(looper, + vdr_send_batches_of_random_and_check(looper, txnPoolNodeSet, - sdk_pool_handle, - sdk_wallet_client, + vdr_pool_handle, + vdr_wallet_client, num_reqs=15, num_batches=5) diff --git a/plenum/test/node_request/message_request/test_node_requests_missing_preprepares_and_prepares.py b/plenum/test/node_request/message_request/test_node_requests_missing_preprepares_and_prepares.py index c10c16bd51..a40cdd0756 100644 --- a/plenum/test/node_request/message_request/test_node_requests_missing_preprepares_and_prepares.py +++ b/plenum/test/node_request/message_request/test_node_requests_missing_preprepares_and_prepares.py @@ -7,7 +7,7 @@ from plenum.test.waits import expectedPoolGetReadyTimeout from stp_core.common.log import getlogger from plenum.test.node_catchup.helper import waitNodeDataEquality -from plenum.test.helper import sdk_send_random_requests, sdk_send_random_and_check +from plenum.test.helper import vdr_send_random_requests, vdr_send_random_and_check from stp_core.loop.eventually import eventually logger = getlogger() @@ -16,7 +16,7 @@ def test_node_requests_missing_preprepares_and_prepares( - looper, txnPoolNodeSet, sdk_wallet_client, sdk_pool_handle, + looper, txnPoolNodeSet, vdr_wallet_client, vdr_pool_handle, tconf, tdir, allPluginsPath): """ 2 of 4 nodes go down (simulate this by dropping requests), so pool can not process any more incoming requests. @@ -33,15 +33,15 @@ def test_node_requests_missing_preprepares_and_prepares( alive_nodes = txnPoolNodeSet[:2] disconnected_nodes_stashers = [n.nodeIbStasher for n in disconnected_nodes] - sdk_send_random_and_check(looper, + vdr_send_random_and_check(looper, txnPoolNodeSet, - sdk_pool_handle, - sdk_wallet_client, + vdr_pool_handle, + vdr_wallet_client, INIT_REQS_CNT) init_ledger_size = txnPoolNodeSet[0].domainLedger.size with delay_rules_without_processing(disconnected_nodes_stashers, delay_3pc()): - sdk_send_random_requests(looper, sdk_pool_handle, sdk_wallet_client, MISSING_REQS_CNT) + vdr_send_random_requests(looper, vdr_pool_handle, vdr_wallet_client, MISSING_REQS_CNT) last_ordered_key = txnPoolNodeSet[0].master_replica.last_ordered_3pc looper.run(eventually(check_pp_out_of_sync, alive_nodes, @@ -58,10 +58,10 @@ def test_node_requests_missing_preprepares_and_prepares( assert node.master_replica._ordering_service.spylog.count(OrderingService._request_prepare) == 0 assert node.master_replica._message_req_service.spylog.count(MessageReqService.process_message_rep) == 0 - sdk_send_random_and_check(looper, + vdr_send_random_and_check(looper, txnPoolNodeSet, - sdk_pool_handle, - sdk_wallet_client, + vdr_pool_handle, + vdr_wallet_client, REQS_AFTER_RECONNECT_CNT) waitNodeDataEquality(looper, disconnected_nodes[0], *txnPoolNodeSet[:-1]) diff --git a/plenum/test/node_request/message_request/test_node_requests_missing_preprepares_and_prepares_after_long_disconnection.py b/plenum/test/node_request/message_request/test_node_requests_missing_preprepares_and_prepares_after_long_disconnection.py index 52e58cf9fe..259e79f793 100644 --- a/plenum/test/node_request/message_request/test_node_requests_missing_preprepares_and_prepares_after_long_disconnection.py +++ b/plenum/test/node_request/message_request/test_node_requests_missing_preprepares_and_prepares_after_long_disconnection.py @@ -12,7 +12,7 @@ from plenum.test.waits import expectedPoolGetReadyTimeout from stp_core.loop.eventually import eventually from stp_core.common.log import getlogger -from plenum.test.helper import sdk_send_random_requests, sdk_send_random_and_check +from plenum.test.helper import vdr_send_random_requests, vdr_send_random_and_check logger = getlogger() @@ -20,7 +20,7 @@ def test_node_requests_missing_preprepares_and_prepares_after_long_disconnection( - looper, txnPoolNodeSet, sdk_wallet_client, sdk_pool_handle, + looper, txnPoolNodeSet, vdr_wallet_client, vdr_pool_handle, tconf, tdirWithPoolTxns, tdir, allPluginsPath): """ 2 of 4 nodes go down (simulate this by dropping requests), so pool can not process any more incoming requests. @@ -46,19 +46,19 @@ def test_node_requests_missing_preprepares_and_prepares_after_long_disconnection disconnected_nodes.append(node) disconnected_nodes_stashers = [n.nodeIbStasher for n in disconnected_nodes] - sdk_send_random_and_check(looper, + vdr_send_random_and_check(looper, txnPoolNodeSet, - sdk_pool_handle, - sdk_wallet_client, + vdr_pool_handle, + vdr_wallet_client, INIT_REQS_CNT) waitNodeDataEquality(looper, disconnected_nodes[0], *txnPoolNodeSet) init_ledger_size = txnPoolNodeSet[0].domainLedger.size with delay_rules_without_processing(disconnected_nodes_stashers, delay_3pc()): - sdk_send_random_requests(looper, - sdk_pool_handle, - sdk_wallet_client, + vdr_send_random_requests(looper, + vdr_pool_handle, + vdr_wallet_client, MISSING_REQS_CNT) last_ordered_key = txnPoolNodeSet[0].master_replica.last_ordered_3pc looper.run(eventually(check_pp_out_of_sync, @@ -80,10 +80,10 @@ def test_node_requests_missing_preprepares_and_prepares_after_long_disconnection assert node.master_replica._ordering_service.spylog.count(OrderingService._request_prepare) == 0 assert node.master_replica._message_req_service.spylog.count(MessageReqService.process_message_rep) == 0 - sdk_send_random_and_check(looper, + vdr_send_random_and_check(looper, txnPoolNodeSet, - sdk_pool_handle, - sdk_wallet_client, + vdr_pool_handle, + vdr_wallet_client, REQS_AFTER_RECONNECT_CNT) waitNodeDataEquality(looper, disconnected_nodes[0], *txnPoolNodeSet) diff --git a/plenum/test/node_request/message_request/test_node_requests_missing_preprepares_prepares_and_commits.py b/plenum/test/node_request/message_request/test_node_requests_missing_preprepares_prepares_and_commits.py index 4671f3aa0a..4e3c6cc92b 100644 --- a/plenum/test/node_request/message_request/test_node_requests_missing_preprepares_prepares_and_commits.py +++ b/plenum/test/node_request/message_request/test_node_requests_missing_preprepares_prepares_and_commits.py @@ -14,7 +14,7 @@ from plenum.test.node_catchup.helper import waitNodeDataEquality from plenum.test.pool_transactions.helper import \ disconnect_node_and_ensure_disconnected, reconnect_node_and_ensure_connected -from plenum.test.helper import sdk_send_random_requests, sdk_send_random_and_check, assertEquality +from plenum.test.helper import vdr_send_random_requests, vdr_send_random_and_check, assertEquality from stp_core.loop.eventually import eventually logger = getlogger() @@ -31,7 +31,7 @@ def tconf(tconf): def test_node_requests_missing_preprepares_prepares_and_commits( - looper, txnPoolNodeSet, sdk_wallet_client, sdk_pool_handle, + looper, txnPoolNodeSet, vdr_wallet_client, vdr_pool_handle, tdir, allPluginsPath): """ 1 of 4 nodes goes down ((simulate this by dropping requests)). A new request comes in and is ordered by @@ -48,19 +48,19 @@ def test_node_requests_missing_preprepares_prepares_and_commits( alive_nodes = txnPoolNodeSet[:3] disconnected_node_stashers = disconnected_node.nodeIbStasher - sdk_send_random_and_check(looper, + vdr_send_random_and_check(looper, txnPoolNodeSet, - sdk_pool_handle, - sdk_wallet_client, + vdr_pool_handle, + vdr_wallet_client, INIT_REQS_CNT) init_ledger_size = txnPoolNodeSet[0].domainLedger.size with delay_rules_without_processing(disconnected_node_stashers, delay_3pc()): last_ordered_key = txnPoolNodeSet[0].master_replica.last_ordered_3pc - sdk_send_random_and_check(looper, + vdr_send_random_and_check(looper, txnPoolNodeSet, - sdk_pool_handle, - sdk_wallet_client, + vdr_pool_handle, + vdr_wallet_client, MISSING_REQS_CNT) looper.run(eventually(check_pp_out_of_sync, alive_nodes, @@ -82,10 +82,10 @@ def test_node_requests_missing_preprepares_prepares_and_commits( MessageReqService.process_message_rep) == 0 doOrderTimesBefore = ordering_service.spylog.count(OrderingService._do_order) - sdk_send_random_and_check(looper, + vdr_send_random_and_check(looper, txnPoolNodeSet, - sdk_pool_handle, - sdk_wallet_client, + vdr_pool_handle, + vdr_wallet_client, REQS_AFTER_RECONNECT_CNT) waitNodeDataEquality(looper, disconnected_node, *alive_nodes) diff --git a/plenum/test/node_request/message_request/test_preprepare_request.py b/plenum/test/node_request/message_request/test_preprepare_request.py index 275909944e..49c861d9bc 100644 --- a/plenum/test/node_request/message_request/test_preprepare_request.py +++ b/plenum/test/node_request/message_request/test_preprepare_request.py @@ -7,7 +7,7 @@ from plenum.test.spy_helpers import getAllReturnVals, get_count from stp_core.loop.eventually import eventually -from plenum.test.helper import sdk_send_batches_of_random_and_check +from plenum.test.helper import vdr_send_batches_of_random_and_check def count_requested_preprepare_resp(node): @@ -25,7 +25,7 @@ def count_requested_preprepare_req(node): def test_node_request_preprepare(looper, txnPoolNodeSet, - sdk_wallet_client, sdk_pool_handle, + vdr_wallet_client, vdr_pool_handle, teardown): """ Node requests PRE-PREPARE only once after getting PREPAREs. @@ -36,10 +36,10 @@ def test_node_request_preprepare(looper, txnPoolNodeSet, slow_node.nodeIbStasher.delay(ppDelay(300, 0)) slow_node.nodeIbStasher.delay(pDelay(300, 0)) - sdk_send_batches_of_random_and_check(looper, + vdr_send_batches_of_random_and_check(looper, txnPoolNodeSet, - sdk_pool_handle, - sdk_wallet_client, + vdr_pool_handle, + vdr_wallet_client, num_reqs=10, num_batches=5) slow_node.nodeIbStasher.drop_delayeds() @@ -89,7 +89,7 @@ def chk(increase=True): def test_no_preprepare_requested(looper, txnPoolNodeSet, - sdk_wallet_client, sdk_pool_handle, + vdr_wallet_client, vdr_pool_handle, teardown): """ Node missing Propagates hence request not finalised, hence stashes @@ -102,10 +102,10 @@ def test_no_preprepare_requested(looper, txnPoolNodeSet, old_count_resp = count_requested_preprepare_resp(slow_node) - sdk_send_batches_of_random_and_check(looper, + vdr_send_batches_of_random_and_check(looper, txnPoolNodeSet, - sdk_pool_handle, - sdk_wallet_client, + vdr_pool_handle, + vdr_wallet_client, num_reqs=4, num_batches=2) diff --git a/plenum/test/node_request/message_request/test_request_ls_for_incorrect_ledger.py b/plenum/test/node_request/message_request/test_request_ls_for_incorrect_ledger.py index 880d36d3fb..5d9148ea46 100644 --- a/plenum/test/node_request/message_request/test_request_ls_for_incorrect_ledger.py +++ b/plenum/test/node_request/message_request/test_request_ls_for_incorrect_ledger.py @@ -1,11 +1,11 @@ from plenum.common.constants import DOMAIN_LEDGER_ID -from plenum.test.conftest import test_node +from plenum.test.conftest import vdr_test_node -def test_request_ls_for_incorrect_ledger(test_node): +def test_request_ls_for_incorrect_ledger(vdr_test_node): incorrect_ledger_id = 12345 correct_ledger_id = DOMAIN_LEDGER_ID - assert not test_node.getLedgerStatus(incorrect_ledger_id) + assert not vdr_test_node.getLedgerStatus(incorrect_ledger_id) # check that node build a ledger status for a correct ledger - assert test_node.getLedgerStatus(correct_ledger_id) + assert vdr_test_node.getLedgerStatus(correct_ledger_id) diff --git a/plenum/test/node_request/message_request/test_requested_preprepare_handling.py b/plenum/test/node_request/message_request/test_requested_preprepare_handling.py index 8af3642f1d..d86cdcd48c 100644 --- a/plenum/test/node_request/message_request/test_requested_preprepare_handling.py +++ b/plenum/test/node_request/message_request/test_requested_preprepare_handling.py @@ -11,11 +11,11 @@ from plenum.test.spy_helpers import get_count from stp_core.loop.eventually import eventually -from plenum.test.helper import sdk_send_batches_of_random_and_check +from plenum.test.helper import vdr_send_batches_of_random_and_check def test_handle_delayed_preprepares(looper, txnPoolNodeSet, - sdk_wallet_client, sdk_pool_handle, + vdr_wallet_client, vdr_pool_handle, teardown, monkeypatch): """ @@ -39,10 +39,10 @@ def patched_method(self, msg): # Delay PRE-PREPAREs by large amount simulating loss slow_node.nodeIbStasher.delay(ppDelay(300, 0)) - sdk_send_batches_of_random_and_check(looper, + vdr_send_batches_of_random_and_check(looper, txnPoolNodeSet, - sdk_pool_handle, - sdk_wallet_client, + vdr_pool_handle, + vdr_wallet_client, num_reqs=10, num_batches=5) waitNodeDataEquality(looper, slow_node, *other_nodes) diff --git a/plenum/test/node_request/node_request_helper.py b/plenum/test/node_request/node_request_helper.py index 77458bcf44..1758c493ca 100644 --- a/plenum/test/node_request/node_request_helper.py +++ b/plenum/test/node_request/node_request_helper.py @@ -1,3 +1,5 @@ +import json + from functools import partial from plenum.common.messages.node_messages import PrePrepare @@ -27,11 +29,18 @@ def g(node: TestNode): 2. no of propagate received by node must be greater than or equal to f + 1 """ - actualMsgs = len([x for x in - getAllArgs(node, Node.processPropagate) - if x['msg'].request[f.REQ_ID.nm] == request.reqId and - x['msg'].request[f.IDENTIFIER.nm] == request.identifier and - x['msg'].request[OPERATION] == request.operation]) + x = [] + for n in getAllArgs(node, Node.processPropagate): + if n['msg'].request[f.REQ_ID.nm] == json.loads(request[0][1][node.name])["result"]["txn"]["metadata"]["reqId"] and \ + n['msg'].request[f.IDENTIFIER.nm] == request[0][0]["identifier"] and \ + n['msg'].request[OPERATION] == {"amount":json.loads(request[0][1][node.name])["result"]["txn"]["data"]["amount"], "type":json.loads(request[0][1][node.name])["result"]["txn"]["type"]}: # operation here equals txn not op + x.append(n) + actualMsgs = len(x) + # actualMsgs = len([x for x in + # getAllArgs(node, Node.processPropagate) + # if x['msg'].request[f.REQ_ID.nm] == json.loads(request[0][1][node.master_primary_name])["result"]["txn"]["metadata"]["reqId"] and + # x['msg'].request[f.IDENTIFIER.nm] == request[0][0]["identifier"] and + # x['msg'].request[OPERATION] == json.loads(request[0][1][node.master_primary_name])["op"]]) numOfMsgsWithZFN = nodesSize - 1 numOfMsgsWithFaults = faultyNodes + 1 diff --git a/plenum/test/node_request/test_1_node_got_only_preprepare.py b/plenum/test/node_request/test_1_node_got_only_preprepare.py index 5403ec4420..17e8413a8e 100644 --- a/plenum/test/node_request/test_1_node_got_only_preprepare.py +++ b/plenum/test/node_request/test_1_node_got_only_preprepare.py @@ -3,7 +3,7 @@ from plenum.test.node_request.helper import nodes_last_ordered_equal from stp_core.loop.eventually import eventually -from plenum.test.helper import sdk_send_batches_of_random_and_check +from plenum.test.helper import vdr_send_batches_of_random_and_check from plenum.test.malicious_behaviors_node import dont_send_prepare_and_commit_to, reset_sending from plenum.test.checkpoints.conftest import chkFreqPatched @@ -19,8 +19,8 @@ def tconf(tconf): def test_1_node_get_only_preprepare(looper, txnPoolNodeSet, - sdk_pool_handle, - sdk_wallet_client, + vdr_pool_handle, + vdr_wallet_client, tconf, chkFreqPatched): master_node = txnPoolNodeSet[0] @@ -29,8 +29,8 @@ def test_1_node_get_only_preprepare(looper, num_of_batches = 1 # Nodes order batches - sdk_send_batches_of_random_and_check( - looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_client, num_of_batches, num_of_batches) + vdr_send_batches_of_random_and_check( + looper, txnPoolNodeSet, vdr_pool_handle, vdr_wallet_client, num_of_batches, num_of_batches) assert behind_node.master_last_ordered_3PC == \ master_node.master_last_ordered_3PC @@ -38,8 +38,8 @@ def test_1_node_get_only_preprepare(looper, dont_send_prepare_and_commit_to(txnPoolNodeSet[:-1], behind_node.name) # Send some txns and behind_node cant order them while pool is working - sdk_send_batches_of_random_and_check( - looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_client, num_of_batches, num_of_batches) + vdr_send_batches_of_random_and_check( + looper, txnPoolNodeSet, vdr_pool_handle, vdr_wallet_client, num_of_batches, num_of_batches) # assert behind_node.master_last_ordered_3PC[1] + num_of_batches == \ # master_node.master_last_ordered_3PC[1] @@ -47,12 +47,12 @@ def test_1_node_get_only_preprepare(looper, reset_sending(txnPoolNodeSet[:-1]) # Send txns - sdk_send_batches_of_random_and_check( - looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_client, num_of_batches, num_of_batches) + vdr_send_batches_of_random_and_check( + looper, txnPoolNodeSet, vdr_pool_handle, vdr_wallet_client, num_of_batches, num_of_batches) # After achieving stable checkpoint, behind_node start ordering - sdk_send_batches_of_random_and_check( - looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_client, delta, delta) + vdr_send_batches_of_random_and_check( + looper, txnPoolNodeSet, vdr_pool_handle, vdr_wallet_client, delta, delta) # Pool is working looper.run(eventually(nodes_last_ordered_equal, behind_node, master_node)) diff --git a/plenum/test/node_request/test_2_nodes_got_only_preprepare.py b/plenum/test/node_request/test_2_nodes_got_only_preprepare.py index 571535813c..bee9f3e935 100644 --- a/plenum/test/node_request/test_2_nodes_got_only_preprepare.py +++ b/plenum/test/node_request/test_2_nodes_got_only_preprepare.py @@ -5,7 +5,7 @@ from plenum.test.node_catchup.helper import waitNodeDataEquality from plenum.test.node_request.helper import nodes_last_ordered_equal -from plenum.test.helper import sdk_send_batches_of_random_and_check +from plenum.test.helper import vdr_send_batches_of_random_and_check from plenum.test.malicious_behaviors_node import dont_send_prepare_and_commit_to, reset_sending from plenum.test.checkpoints.conftest import chkFreqPatched @@ -21,8 +21,8 @@ def tconf(tconf): def test_2_nodes_get_only_preprepare(looper, txnPoolNodeSet, - sdk_pool_handle, - sdk_wallet_client, + vdr_pool_handle, + vdr_wallet_client, tconf, chkFreqPatched): # CHK_FREQ = 2 in this test @@ -32,16 +32,16 @@ def test_2_nodes_get_only_preprepare(looper, behind_nodes = txnPoolNodeSet[-2:] # Nodes order batches - sdk_send_batches_of_random_and_check( - looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_client, 1, 1) + vdr_send_batches_of_random_and_check( + looper, txnPoolNodeSet, vdr_pool_handle, vdr_wallet_client, 1, 1) nodes_last_ordered_equal(*txnPoolNodeSet) # Emulate connection problems, 1st behind_node receiving only pre-prepares dont_send_prepare_and_commit_to(txnPoolNodeSet[:-2], behind_nodes[0].name) # Send some txns and 1st behind_node cant order them while pool is working - sdk_send_batches_of_random_and_check( - looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_client, 1, 1) + vdr_send_batches_of_random_and_check( + looper, txnPoolNodeSet, vdr_pool_handle, vdr_wallet_client, 1, 1) assert behind_nodes[0].master_last_ordered_3PC[1] + 1 == \ master_node.master_last_ordered_3PC[1] @@ -53,8 +53,8 @@ def test_2_nodes_get_only_preprepare(looper, reset_sending(txnPoolNodeSet[:-2]) # Send txns - sdk_send_batches_of_random_and_check( - looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_client, 1, 1) + vdr_send_batches_of_random_and_check( + looper, txnPoolNodeSet, vdr_pool_handle, vdr_wallet_client, 1, 1) # 1st behind_node is getting new prepares, but still can't order, # cause can't get quorum for prepare for previous batch @@ -65,8 +65,8 @@ def test_2_nodes_get_only_preprepare(looper, dont_send_prepare_and_commit_to(txnPoolNodeSet[:-2], behind_nodes[1].name) # Send some txns and 2nd behind_node cant order them while pool is working - sdk_send_batches_of_random_and_check( - looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_client, 1, 1) + vdr_send_batches_of_random_and_check( + looper, txnPoolNodeSet, vdr_pool_handle, vdr_wallet_client, 1, 1) assert behind_nodes[1].master_last_ordered_3PC[1] + 1 == \ master_node.master_last_ordered_3PC[1] @@ -82,8 +82,8 @@ def test_2_nodes_get_only_preprepare(looper, reset_sending(txnPoolNodeSet[:-2]) # Send txns - sdk_send_batches_of_random_and_check( - looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_client, 1, 1) + vdr_send_batches_of_random_and_check( + looper, txnPoolNodeSet, vdr_pool_handle, vdr_wallet_client, 1, 1) # 2nd behind_node is getting new prepares, but still can't order, # cause can't get quorum for prepare for previous batch @@ -91,7 +91,7 @@ def test_2_nodes_get_only_preprepare(looper, master_node.master_last_ordered_3PC[1] # After achieving stable checkpoint, behind_node start ordering - sdk_send_batches_of_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_client, + vdr_send_batches_of_random_and_check(looper, txnPoolNodeSet, vdr_pool_handle, vdr_wallet_client, 1, 1) # 2d behind got another stashed checkpoint, so should catch-up now waitNodeDataEquality(looper, master_node, behind_nodes[1], customTimeout=60, diff --git a/plenum/test/node_request/test_already_processed_request.py b/plenum/test/node_request/test_already_processed_request.py index 43a483878e..d5554c1397 100644 --- a/plenum/test/node_request/test_already_processed_request.py +++ b/plenum/test/node_request/test_already_processed_request.py @@ -1,11 +1,11 @@ -from plenum.test.helper import sdk_send_random_and_check, \ - sdk_send_signed_requests, sdk_eval_timeout, \ - sdk_get_replies, sdk_check_reply, sdk_signed_random_requests +from plenum.test.helper import vdr_send_random_and_check, \ + vdr_send_signed_requests, vdr_eval_timeout, \ + vdr_get_replies, vdr_check_reply, vdr_signed_random_requests from plenum.test.spy_helpers import getAllReturnVals def test_already_processed_requests(looper, txnPoolNodeSet, - sdk_pool_handle, sdk_wallet_client): + vdr_pool_handle, vdr_wallet_client): """ Client re-sending request and checking that nodes picked the reply from ledger and did not process the request again @@ -40,14 +40,14 @@ def get_last_returned_val(): rpc1 = get_recordAndPropagate_call_count() # Request which will be send twice - reqs = sdk_signed_random_requests(looper, sdk_wallet_client, 1) + reqs = vdr_signed_random_requests(looper, vdr_wallet_client, 1) # Send, check and getting reply from first request - sdk_reqs = sdk_send_signed_requests(sdk_pool_handle, reqs) - total_timeout = sdk_eval_timeout(len(sdk_reqs), len(txnPoolNodeSet)) - request1 = sdk_get_replies(looper, sdk_reqs, timeout=total_timeout) + sdk_reqs = vdr_send_signed_requests(vdr_pool_handle, reqs, looper) + total_timeout = vdr_eval_timeout(len(sdk_reqs), len(txnPoolNodeSet)) + request1 = vdr_get_replies(looper, sdk_reqs, timeout=total_timeout) for req_res in request1: - sdk_check_reply(req_res) + vdr_check_reply(req_res) first_req_id = request1[0][0]['reqId'] rlc2 = get_getReplyFromLedgerForRequest_call_count() @@ -58,7 +58,7 @@ def get_last_returned_val(): assert r1 is None # getReplyFromLedgerForRequest returned None since had not seen request # Request which we will send only once - request2 = sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_client, 1) + request2 = vdr_send_random_and_check(looper, txnPoolNodeSet, vdr_pool_handle, vdr_wallet_client, 1) second_req_id = request2[0][0]['reqId'] assert second_req_id != first_req_id @@ -73,9 +73,9 @@ def get_last_returned_val(): rep1 = request1[0][1]['result'] # Client re-sending first request - request3 = sdk_send_signed_requests(sdk_pool_handle, reqs) - total_timeout = sdk_eval_timeout(len(request3), len(txnPoolNodeSet)) - request3 = sdk_get_replies(looper, request3, timeout=total_timeout) + request3 = vdr_send_signed_requests(vdr_pool_handle, reqs, looper) + total_timeout = vdr_eval_timeout(len(request3), len(txnPoolNodeSet)) + request3 = vdr_get_replies(looper, request3, timeout=total_timeout) third_req_id = request3[0][0]['reqId'] assert third_req_id == first_req_id diff --git a/plenum/test/node_request/test_belated_request_not_processed.py b/plenum/test/node_request/test_belated_request_not_processed.py index cd2a9430c0..3dc87277d0 100644 --- a/plenum/test/node_request/test_belated_request_not_processed.py +++ b/plenum/test/node_request/test_belated_request_not_processed.py @@ -1,20 +1,20 @@ from plenum.test import waits from plenum.test.delayers import cDelay, req_delay, ppgDelay -from plenum.test.helper import sdk_signed_random_requests, \ - sdk_send_signed_requests, sdk_send_and_check +from plenum.test.helper import vdr_signed_random_requests, \ + vdr_send_signed_requests, vdr_send_and_check from plenum.test.test_node import ensureElectionsDone from plenum.test.view_change.helper import ensure_view_change def test_repeated_request_not_processed_if_already_ordered( - looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_client): + looper, txnPoolNodeSet, vdr_pool_handle, vdr_wallet_client): delta = txnPoolNodeSet[3] initial_ledger_size = delta.domainLedger.size - one_req = sdk_signed_random_requests(looper, sdk_wallet_client, 1) - sdk_send_and_check(one_req, looper, txnPoolNodeSet, sdk_pool_handle) + one_req = vdr_signed_random_requests(looper, vdr_wallet_client, 1) + vdr_send_and_check(one_req, looper, txnPoolNodeSet, vdr_pool_handle) - sdk_send_signed_requests(sdk_pool_handle, one_req) + vdr_send_signed_requests(vdr_pool_handle, one_req, looper) looper.runFor(waits.expectedTransactionExecutionTime(len(txnPoolNodeSet))) for node in txnPoolNodeSet: @@ -22,13 +22,13 @@ def test_repeated_request_not_processed_if_already_ordered( def test_belated_request_not_processed_if_already_ordered( - looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_client): + looper, txnPoolNodeSet, vdr_pool_handle, vdr_wallet_client): delta = txnPoolNodeSet[3] initial_ledger_size = delta.domainLedger.size delta.clientIbStasher.delay(req_delay(300)) - one_req = sdk_signed_random_requests(looper, sdk_wallet_client, 1) - sdk_send_and_check(one_req, looper, txnPoolNodeSet, sdk_pool_handle) + one_req = vdr_signed_random_requests(looper, vdr_wallet_client, 1) + vdr_send_and_check(one_req, looper, txnPoolNodeSet, vdr_pool_handle) delta.clientIbStasher.reset_delays_and_process_delayeds() looper.runFor(waits.expectedTransactionExecutionTime(len(txnPoolNodeSet))) @@ -38,13 +38,13 @@ def test_belated_request_not_processed_if_already_ordered( def test_belated_propagate_not_processed_if_already_ordered( - looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_client): + looper, txnPoolNodeSet, vdr_pool_handle, vdr_wallet_client): delta = txnPoolNodeSet[3] initial_ledger_size = delta.domainLedger.size delta.nodeIbStasher.delay(ppgDelay(300, 'Gamma')) - one_req = sdk_signed_random_requests(looper, sdk_wallet_client, 1) - sdk_send_and_check(one_req, looper, txnPoolNodeSet, sdk_pool_handle) + one_req = vdr_signed_random_requests(looper, vdr_wallet_client, 1) + vdr_send_and_check(one_req, looper, txnPoolNodeSet, vdr_pool_handle) delta.nodeIbStasher.reset_delays_and_process_delayeds() looper.runFor(waits.expectedTransactionExecutionTime(len(txnPoolNodeSet))) @@ -54,20 +54,20 @@ def test_belated_propagate_not_processed_if_already_ordered( def test_repeated_request_not_processed_if_already_in_3pc_process( - looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_client): + looper, txnPoolNodeSet, vdr_pool_handle, vdr_wallet_client): delta = txnPoolNodeSet[3] initial_ledger_size = delta.domainLedger.size for node in txnPoolNodeSet: node.nodeIbStasher.delay(cDelay(300)) - one_req = sdk_signed_random_requests(looper, sdk_wallet_client, 1) - sdk_send_signed_requests(sdk_pool_handle, one_req) + one_req = vdr_signed_random_requests(looper, vdr_wallet_client, 1) + vdr_send_signed_requests(vdr_pool_handle, one_req, looper) looper.runFor(waits.expectedPropagateTime(len(txnPoolNodeSet)) + waits.expectedPrePrepareTime(len(txnPoolNodeSet)) + waits.expectedPrepareTime(len(txnPoolNodeSet)) + waits.expectedCommittedTime(len(txnPoolNodeSet))) - sdk_send_signed_requests(sdk_pool_handle, one_req) + vdr_send_signed_requests(vdr_pool_handle, one_req, looper) looper.runFor(waits.expectedPropagateTime(len(txnPoolNodeSet)) + waits.expectedPrePrepareTime(len(txnPoolNodeSet)) + waits.expectedPrepareTime(len(txnPoolNodeSet)) + @@ -82,15 +82,15 @@ def test_repeated_request_not_processed_if_already_in_3pc_process( def test_belated_request_not_processed_if_already_in_3pc_process( - looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_client): + looper, txnPoolNodeSet, vdr_pool_handle, vdr_wallet_client): delta = txnPoolNodeSet[3] initial_ledger_size = delta.domainLedger.size delta.clientIbStasher.delay(req_delay(300)) for node in txnPoolNodeSet: node.nodeIbStasher.delay(cDelay(300)) - one_req = sdk_signed_random_requests(looper, sdk_wallet_client, 1) - sdk_send_signed_requests(sdk_pool_handle, one_req) + one_req = vdr_signed_random_requests(looper, vdr_wallet_client, 1) + vdr_send_signed_requests(vdr_pool_handle, one_req, looper) looper.runFor(waits.expectedPropagateTime(len(txnPoolNodeSet)) + waits.expectedPrePrepareTime(len(txnPoolNodeSet)) + waits.expectedPrepareTime(len(txnPoolNodeSet)) + @@ -111,15 +111,15 @@ def test_belated_request_not_processed_if_already_in_3pc_process( def test_belated_propagate_not_processed_if_already_in_3pc_process( - looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_client): + looper, txnPoolNodeSet, vdr_pool_handle, vdr_wallet_client): delta = txnPoolNodeSet[3] initial_ledger_size = delta.domainLedger.size delta.nodeIbStasher.delay(ppgDelay(300, 'Gamma')) for node in txnPoolNodeSet: node.nodeIbStasher.delay(cDelay(300)) - one_req = sdk_signed_random_requests(looper, sdk_wallet_client, 1) - sdk_send_signed_requests(sdk_pool_handle, one_req) + one_req = vdr_signed_random_requests(looper, vdr_wallet_client, 1) + vdr_send_signed_requests(vdr_pool_handle, one_req, looper) looper.runFor(waits.expectedPropagateTime(len(txnPoolNodeSet)) + waits.expectedPrePrepareTime(len(txnPoolNodeSet)) + waits.expectedPrepareTime(len(txnPoolNodeSet)) + @@ -140,17 +140,17 @@ def test_belated_propagate_not_processed_if_already_in_3pc_process( def test_repeated_request_not_processed_after_view_change( - looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_client): + looper, txnPoolNodeSet, vdr_pool_handle, vdr_wallet_client): delta = txnPoolNodeSet[3] initial_ledger_size = delta.domainLedger.size - one_req = sdk_signed_random_requests(looper, sdk_wallet_client, 1) - sdk_send_and_check(one_req, looper, txnPoolNodeSet, sdk_pool_handle) + one_req = vdr_signed_random_requests(looper, vdr_wallet_client, 1) + vdr_send_and_check(one_req, looper, txnPoolNodeSet, vdr_pool_handle) ensure_view_change(looper, txnPoolNodeSet) ensureElectionsDone(looper, txnPoolNodeSet) - sdk_send_signed_requests(sdk_pool_handle, one_req) + vdr_send_signed_requests(vdr_pool_handle, one_req, looper) looper.runFor(waits.expectedTransactionExecutionTime(len(txnPoolNodeSet))) for node in txnPoolNodeSet: @@ -158,13 +158,13 @@ def test_repeated_request_not_processed_after_view_change( def test_belated_request_not_processed_after_view_change( - looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_client): + looper, txnPoolNodeSet, vdr_pool_handle, vdr_wallet_client): delta = txnPoolNodeSet[3] initial_ledger_size = delta.domainLedger.size delta.clientIbStasher.delay(req_delay(300)) - one_req = sdk_signed_random_requests(looper, sdk_wallet_client, 1) - sdk_send_and_check(one_req, looper, txnPoolNodeSet, sdk_pool_handle) + one_req = vdr_signed_random_requests(looper, vdr_wallet_client, 1) + vdr_send_and_check(one_req, looper, txnPoolNodeSet, vdr_pool_handle) ensure_view_change(looper, txnPoolNodeSet) ensureElectionsDone(looper, txnPoolNodeSet) @@ -177,13 +177,13 @@ def test_belated_request_not_processed_after_view_change( def test_belated_propagate_not_processed_after_view_change( - looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_client): + looper, txnPoolNodeSet, vdr_pool_handle, vdr_wallet_client): delta = txnPoolNodeSet[3] initial_ledger_size = delta.domainLedger.size delta.nodeIbStasher.delay(ppgDelay(300, 'Gamma')) - one_req = sdk_signed_random_requests(looper, sdk_wallet_client, 1) - sdk_send_and_check(one_req, looper, txnPoolNodeSet, sdk_pool_handle) + one_req = vdr_signed_random_requests(looper, vdr_wallet_client, 1) + vdr_send_and_check(one_req, looper, txnPoolNodeSet, vdr_pool_handle) ensure_view_change(looper, txnPoolNodeSet) ensureElectionsDone(looper, txnPoolNodeSet) diff --git a/plenum/test/node_request/test_commit/test_commits_dequeue_commits.py b/plenum/test/node_request/test_commit/test_commits_dequeue_commits.py index db3d80b314..f865f5b105 100644 --- a/plenum/test/node_request/test_commit/test_commits_dequeue_commits.py +++ b/plenum/test/node_request/test_commit/test_commits_dequeue_commits.py @@ -4,11 +4,11 @@ from plenum.test.node_catchup.helper import waitNodeDataEquality from plenum.test.stasher import delay_rules from plenum.test.test_node import getNonPrimaryReplicas -from plenum.test.helper import sdk_send_batches_of_random_and_check +from plenum.test.helper import vdr_send_batches_of_random_and_check def test_dequeue_and_validate_commits(looper, txnPoolNodeSet, - sdk_wallet_client, sdk_pool_handle): + vdr_wallet_client, vdr_pool_handle): slow_node = [r.node for r in getNonPrimaryReplicas(txnPoolNodeSet, 0)][-1] other_nodes = [n for n in txnPoolNodeSet if n != slow_node] delay = 50 @@ -17,10 +17,10 @@ def test_dequeue_and_validate_commits(looper, txnPoolNodeSet, msg_rep_delay(delay, [PREPARE, PREPREPARE])): with delay_rules(slow_node.nodeIbStasher, ppDelay(delay)): - sdk_send_batches_of_random_and_check(looper, + vdr_send_batches_of_random_and_check(looper, txnPoolNodeSet, - sdk_pool_handle, - sdk_wallet_client, + vdr_pool_handle, + vdr_wallet_client, num_reqs=1, num_batches=1) diff --git a/plenum/test/node_request/test_commit/test_commits_recvd_first.py b/plenum/test/node_request/test_commit/test_commits_recvd_first.py index 9f9db4ea1f..d789172df9 100644 --- a/plenum/test/node_request/test_commit/test_commits_recvd_first.py +++ b/plenum/test/node_request/test_commit/test_commits_recvd_first.py @@ -2,21 +2,21 @@ from plenum.test.delayers import ppDelay, pDelay from plenum.test.node_catchup.helper import waitNodeDataEquality from plenum.test.test_node import getNonPrimaryReplicas -from plenum.test.helper import sdk_send_batches_of_random_and_check +from plenum.test.helper import vdr_send_batches_of_random_and_check def test_commits_recvd_first(looper, txnPoolNodeSet, - sdk_wallet_client, sdk_pool_handle): + vdr_wallet_client, vdr_pool_handle): slow_node = [r.node for r in getNonPrimaryReplicas(txnPoolNodeSet, 0)][-1] other_nodes = [n for n in txnPoolNodeSet if n != slow_node] delay = 50 slow_node.nodeIbStasher.delay(ppDelay(delay, 0)) slow_node.nodeIbStasher.delay(pDelay(delay, 0)) - sdk_send_batches_of_random_and_check(looper, + vdr_send_batches_of_random_and_check(looper, txnPoolNodeSet, - sdk_pool_handle, - sdk_wallet_client, + vdr_pool_handle, + vdr_wallet_client, num_reqs=20, num_batches=4) diff --git a/plenum/test/node_request/test_commit/test_commits_without_prepares.py b/plenum/test/node_request/test_commit/test_commits_without_prepares.py index c0f7134203..9b24b65358 100644 --- a/plenum/test/node_request/test_commit/test_commits_without_prepares.py +++ b/plenum/test/node_request/test_commit/test_commits_without_prepares.py @@ -1,12 +1,12 @@ from plenum.test.delayers import pDelay from plenum.test.test_node import get_master_primary_node -from plenum.test.helper import sdk_send_random_and_check +from plenum.test.helper import vdr_send_random_and_check def test_primary_receives_delayed_prepares(looper, txnPoolNodeSet, - sdk_wallet_client, - sdk_pool_handle): + vdr_wallet_client, + vdr_pool_handle): """ Primary gets all PREPAREs after COMMITs """ @@ -15,10 +15,10 @@ def test_primary_receives_delayed_prepares(looper, txnPoolNodeSet, other_nodes = [n for n in txnPoolNodeSet if n != primary_node] primary_node.nodeIbStasher.delay(pDelay(delay, 0)) - sdk_send_random_and_check(looper, + vdr_send_random_and_check(looper, txnPoolNodeSet, - sdk_pool_handle, - sdk_wallet_client, + vdr_pool_handle, + vdr_wallet_client, count=10) for node in other_nodes: diff --git a/plenum/test/node_request/test_different_ledger_request_interleave.py b/plenum/test/node_request/test_different_ledger_request_interleave.py index 3290c34429..492558fd05 100644 --- a/plenum/test/node_request/test_different_ledger_request_interleave.py +++ b/plenum/test/node_request/test_different_ledger_request_interleave.py @@ -1,9 +1,9 @@ -from plenum.test.helper import sdk_send_random_and_check, sdk_send_random_requests, \ - sdk_eval_timeout, sdk_get_and_check_replies -from plenum.test.node_request.helper import sdk_ensure_pool_functional +from plenum.test.helper import vdr_send_random_and_check, vdr_send_random_requests, \ + vdr_eval_timeout, vdr_get_and_check_replies +from plenum.test.node_request.helper import vdr_ensure_pool_functional from plenum.test.node_catchup.helper import ensure_all_nodes_have_same_data -from plenum.test.pool_transactions.helper import sdk_add_new_nym, \ - prepare_new_node_data, prepare_node_request, sdk_sign_and_send_prepared_request +from plenum.test.pool_transactions.helper import vdr_add_new_nym, \ + prepare_new_node_data, vdr_prepare_node_request, vdr_sign_and_send_prepared_request from plenum.test.test_node import checkProtocolInstanceSetup from plenum.test.view_change.helper import ensure_view_change @@ -17,47 +17,47 @@ def test_different_ledger_request_interleave(tconf, looper, txnPoolNodeSet, tdir, tdirWithPoolTxns, allPluginsPath, - sdk_pool_handle, sdk_wallet_client, - sdk_wallet_steward): + vdr_pool_handle, vdr_wallet_client, + vdr_wallet_steward): """ Send pool and domain ledger requests such that they interleave, and do view change in between and verify the pool is functional """ new_node = sdk_one_node_added - sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle, - sdk_wallet_client, 2) + vdr_send_random_and_check(looper, txnPoolNodeSet, vdr_pool_handle, + vdr_wallet_client, 2) ensure_all_nodes_have_same_data(looper, txnPoolNodeSet) # Send domain ledger requests but don't wait for replies - requests = sdk_send_random_requests(looper, sdk_pool_handle, - sdk_wallet_client, 2) + requests = vdr_send_random_requests(looper, vdr_pool_handle, + vdr_wallet_client, 2) # Add another node by sending pool ledger request _, new_theta = sdk_node_theta_added(looper, txnPoolNodeSet, tdir, tconf, - sdk_pool_handle, - sdk_wallet_steward, + vdr_pool_handle, + vdr_wallet_steward, allPluginsPath, name='new_theta') # Send more domain ledger requests but don't wait for replies - requests.extend(sdk_send_random_requests(looper, sdk_pool_handle, - sdk_wallet_client, 3)) + requests.extend(vdr_send_random_requests(looper, vdr_pool_handle, + vdr_wallet_client, 3)) # Do view change without waiting for replies ensure_view_change(looper, nodes=txnPoolNodeSet) checkProtocolInstanceSetup(looper, txnPoolNodeSet, retryWait=1) # Make sure all requests are completed - total_timeout = sdk_eval_timeout(len(requests), len(txnPoolNodeSet)) - sdk_get_and_check_replies(looper, requests, timeout=total_timeout) - sdk_ensure_pool_functional(looper, txnPoolNodeSet, - sdk_wallet_client, sdk_pool_handle) - new_steward_wallet, steward_did = sdk_add_new_nym(looper, - sdk_pool_handle, - sdk_wallet_steward, + total_timeout = vdr_eval_timeout(len(requests), len(txnPoolNodeSet)) + vdr_get_and_check_replies(looper, requests, timeout=total_timeout) + vdr_ensure_pool_functional(looper, txnPoolNodeSet, + vdr_wallet_client, vdr_pool_handle) + new_steward_wallet, steward_did = vdr_add_new_nym(looper, + vdr_pool_handle, + vdr_wallet_steward, 'another_ste', role='STEWARD') @@ -68,7 +68,7 @@ def test_different_ledger_request_interleave(tconf, looper, txnPoolNodeSet, sigseed, verkey, bls_key, nodeIp, nodePort, clientIp, clientPort, key_proof = \ prepare_new_node_data(tconf, tdir, next_node_name) node_req = looper.loop.run_until_complete( - prepare_node_request(steward_did, + vdr_prepare_node_request(steward_did, new_node_name=next_node_name, clientIp=clientIp, clientPort=clientPort, @@ -79,19 +79,19 @@ def test_different_ledger_request_interleave(tconf, looper, txnPoolNodeSet, key_proof=key_proof)) sdk_wallet = (new_steward_wallet, steward_did) - request_couple = sdk_sign_and_send_prepared_request(looper, sdk_wallet, - sdk_pool_handle, + request_couple = vdr_sign_and_send_prepared_request(looper, sdk_wallet, + vdr_pool_handle, node_req) # Send more domain ledger requests but don't wait for replies request_couples = [request_couple, * - sdk_send_random_requests(looper, sdk_pool_handle, - sdk_wallet_client, 5)] + vdr_send_random_requests(looper, vdr_pool_handle, + vdr_wallet_client, 5)] # Make sure all requests are completed - total_timeout = sdk_eval_timeout(len(request_couples), len(txnPoolNodeSet)) - sdk_get_and_check_replies(looper, request_couples, timeout=total_timeout) + total_timeout = vdr_eval_timeout(len(request_couples), len(txnPoolNodeSet)) + vdr_get_and_check_replies(looper, request_couples, timeout=total_timeout) # Make sure pool is functional - sdk_ensure_pool_functional(looper, txnPoolNodeSet, - sdk_wallet_client, sdk_pool_handle) + vdr_ensure_pool_functional(looper, txnPoolNodeSet, + vdr_wallet_client, vdr_pool_handle) diff --git a/plenum/test/node_request/test_discard_3pc_for_ordered.py b/plenum/test/node_request/test_discard_3pc_for_ordered.py index bb4e3a948f..6440c1b46a 100644 --- a/plenum/test/node_request/test_discard_3pc_for_ordered.py +++ b/plenum/test/node_request/test_discard_3pc_for_ordered.py @@ -7,7 +7,7 @@ chk_commits_prepares_recvd from plenum.test.test_node import getNonPrimaryReplicas from stp_core.loop.eventually import eventually -from plenum.test.helper import sdk_send_batches_of_random_and_check +from plenum.test.helper import vdr_send_batches_of_random_and_check @pytest.fixture(scope="module") @@ -19,7 +19,7 @@ def tconf(tconf): def test_discard_3PC_messages_for_already_ordered(looper, txnPoolNodeSet, - sdk_wallet_client, sdk_pool_handle): + vdr_wallet_client, vdr_pool_handle): """ Nodes discard any 3PC messages for already ordered 3PC keys (view_no, pp_seq_no). Delay all 3PC messages to a node so it cannot respond @@ -33,10 +33,10 @@ def test_discard_3PC_messages_for_already_ordered(looper, txnPoolNodeSet, delay_3pc_messages([slow_node], 1, delay) sent_batches = 3 - sdk_send_batches_of_random_and_check(looper, + vdr_send_batches_of_random_and_check(looper, txnPoolNodeSet, - sdk_pool_handle, - sdk_wallet_client, + vdr_pool_handle, + vdr_wallet_client, num_reqs=2 * sent_batches, num_batches=sent_batches) diff --git a/plenum/test/node_request/test_node_got_no_preprepare.py b/plenum/test/node_request/test_node_got_no_preprepare.py index 99e4f21d54..9427d2986b 100644 --- a/plenum/test/node_request/test_node_got_no_preprepare.py +++ b/plenum/test/node_request/test_node_got_no_preprepare.py @@ -3,7 +3,7 @@ from plenum.test.node_request.helper import nodes_last_ordered_equal from stp_core.loop.eventually import eventually -from plenum.test.helper import sdk_send_batches_of_random_and_check, sdk_send_batches_of_random +from plenum.test.helper import vdr_send_batches_of_random_and_check, vdr_send_batches_of_random from plenum.test.malicious_behaviors_node import router_dont_accept_messages_from, reset_router_accepting from plenum.test.checkpoints.conftest import chkFreqPatched @@ -18,8 +18,8 @@ def tconf(tconf): def test_1_node_got_no_preprepare(looper, txnPoolNodeSet, - sdk_pool_handle, - sdk_wallet_client, + vdr_pool_handle, + vdr_wallet_client, tconf, chkFreqPatched): master_node = txnPoolNodeSet[0] @@ -28,8 +28,8 @@ def test_1_node_got_no_preprepare(looper, num_of_batches = 1 # Nodes order batches - sdk_send_batches_of_random_and_check( - looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_client, num_of_batches, num_of_batches) + vdr_send_batches_of_random_and_check( + looper, txnPoolNodeSet, vdr_pool_handle, vdr_wallet_client, num_of_batches, num_of_batches) assert behind_node.master_last_ordered_3PC == \ master_node.master_last_ordered_3PC @@ -37,8 +37,8 @@ def test_1_node_got_no_preprepare(looper, router_dont_accept_messages_from(behind_node, master_node.name) # Send some txns and behind_node cant order them while pool is working - sdk_send_batches_of_random_and_check( - looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_client, num_of_batches, num_of_batches) + vdr_send_batches_of_random_and_check( + looper, txnPoolNodeSet, vdr_pool_handle, vdr_wallet_client, num_of_batches, num_of_batches) with pytest.raises(AssertionError): nodes_last_ordered_equal(behind_node, master_node) @@ -49,8 +49,8 @@ def test_1_node_got_no_preprepare(looper, reset_router_accepting(behind_node) # Send txns - sdk_send_batches_of_random_and_check( - looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_client, num_of_batches, num_of_batches) + vdr_send_batches_of_random_and_check( + looper, txnPoolNodeSet, vdr_pool_handle, vdr_wallet_client, num_of_batches, num_of_batches) # behind_node stashing new 3pc messages and not ordering and not participating in consensus assert len(behind_node.master_replica._ordering_service.prePreparesPendingPrevPP) == 1 @@ -58,8 +58,8 @@ def test_1_node_got_no_preprepare(looper, nodes_last_ordered_equal(behind_node, master_node) # After achieving stable checkpoint, behind_node start ordering - sdk_send_batches_of_random_and_check( - looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_client, delta, delta) + vdr_send_batches_of_random_and_check( + looper, txnPoolNodeSet, vdr_pool_handle, vdr_wallet_client, delta, delta) # Pool is working looper.run(eventually(nodes_last_ordered_equal, behind_node, master_node)) @@ -67,8 +67,8 @@ def test_1_node_got_no_preprepare(looper, def test_2_node_got_no_preprepare(looper, txnPoolNodeSet, - sdk_pool_handle, - sdk_wallet_client, + vdr_pool_handle, + vdr_wallet_client, tconf, chkFreqPatched): master_node = txnPoolNodeSet[0] @@ -77,16 +77,16 @@ def test_2_node_got_no_preprepare(looper, num_of_batches = 1 # Nodes order batches - sdk_send_batches_of_random_and_check( - looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_client, num_of_batches, num_of_batches) + vdr_send_batches_of_random_and_check( + looper, txnPoolNodeSet, vdr_pool_handle, vdr_wallet_client, num_of_batches, num_of_batches) nodes_last_ordered_equal(*behind_nodes, master_node) # Emulate connection problems, behind_node doesnt receive pre-prepares router_dont_accept_messages_from(behind_nodes[0], master_node.name) # Send some txns and behind_node cant order them while pool is working - sdk_send_batches_of_random_and_check( - looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_client, num_of_batches, num_of_batches) + vdr_send_batches_of_random_and_check( + looper, txnPoolNodeSet, vdr_pool_handle, vdr_wallet_client, num_of_batches, num_of_batches) with pytest.raises(AssertionError): nodes_last_ordered_equal(behind_nodes[0], master_node) @@ -98,8 +98,8 @@ def test_2_node_got_no_preprepare(looper, reset_router_accepting(behind_nodes[0]) # Send txns - sdk_send_batches_of_random_and_check( - looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_client, num_of_batches, num_of_batches) + vdr_send_batches_of_random_and_check( + looper, txnPoolNodeSet, vdr_pool_handle, vdr_wallet_client, num_of_batches, num_of_batches) # behind_node stashing new 3pc messages and not ordering and not participating in consensus assert len(behind_nodes[0].master_replica._ordering_service.prePreparesPendingPrevPP) == 1 @@ -110,8 +110,8 @@ def test_2_node_got_no_preprepare(looper, router_dont_accept_messages_from(behind_nodes[1], master_node.name) # Send some txns and behind_node cant order them while pool is working - sdk_send_batches_of_random( - looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_client, num_of_batches, num_of_batches) + vdr_send_batches_of_random( + looper, txnPoolNodeSet, vdr_pool_handle, vdr_wallet_client, num_of_batches, num_of_batches) # Remove connection problems reset_router_accepting(behind_nodes[1]) @@ -120,12 +120,12 @@ def test_2_node_got_no_preprepare(looper, looper.run(eventually(nodes_last_ordered_equal, behind_nodes[1], master_node)) # Send txns - sdk_send_batches_of_random( - looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_client, num_of_batches, num_of_batches) + vdr_send_batches_of_random( + looper, txnPoolNodeSet, vdr_pool_handle, vdr_wallet_client, num_of_batches, num_of_batches) # After achieving stable checkpoint, behind_node start ordering - sdk_send_batches_of_random( - looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_client, delta, delta) + vdr_send_batches_of_random( + looper, txnPoolNodeSet, vdr_pool_handle, vdr_wallet_client, delta, delta) # Pool is working looper.run(eventually(nodes_last_ordered_equal, *behind_nodes, master_node)) diff --git a/plenum/test/node_request/test_order/test_ordering_when_pre_prepare_not_received.py b/plenum/test/node_request/test_order/test_ordering_when_pre_prepare_not_received.py index 5dc71fca13..071c5647ab 100644 --- a/plenum/test/node_request/test_order/test_ordering_when_pre_prepare_not_received.py +++ b/plenum/test/node_request/test_order/test_ordering_when_pre_prepare_not_received.py @@ -4,12 +4,12 @@ from plenum.test import waits from plenum.test.delayers import ppDelay, pDelay -from plenum.test.helper import sdk_send_random_request +from plenum.test.helper import vdr_send_random_request from plenum.test.test_node import getNonPrimaryReplicas def testOrderingWhenPrePrepareNotReceived(looper, txnPoolNodeSet, - sdk_wallet_client, sdk_pool_handle): + vdr_wallet_client, vdr_pool_handle): """ Send commits but delay pre-prepare and prepares such that enough commits are received, now the request should not be ordered until @@ -42,7 +42,7 @@ def patched_p(self, msg, sender): def chk1(): assert len(slow_rep._ordering_service.commitsWaitingForPrepare) > 0 - sdk_send_random_request(looper, sdk_pool_handle, sdk_wallet_client) + vdr_send_random_request(looper, vdr_pool_handle, vdr_wallet_client) timeout = waits.expectedPrePrepareTime(len(txnPoolNodeSet)) + delay looper.run(eventually(chk1, retryWait=1, timeout=timeout)) diff --git a/plenum/test/node_request/test_order/test_request_ordering_1.py b/plenum/test/node_request/test_order/test_request_ordering_1.py index efb1ace35c..7c5578f556 100644 --- a/plenum/test/node_request/test_order/test_request_ordering_1.py +++ b/plenum/test/node_request/test_order/test_request_ordering_1.py @@ -1,12 +1,12 @@ import types from stp_core.loop.eventually import eventually -from plenum.test.helper import sdk_send_random_request +from plenum.test.helper import vdr_send_random_request from plenum.test.malicious_behaviors_node import delaysPrePrepareProcessing from plenum.test.test_node import getNonPrimaryReplicas -def testOrderingCase1(looper, txnPoolNodeSet, sdk_wallet_client, sdk_pool_handle): +def testOrderingCase1(looper, txnPoolNodeSet, vdr_wallet_client, vdr_pool_handle): """ Scenario -> PRE-PREPARE not received by the replica, Request not received for ordering by the replica, but received enough commits to start ordering. @@ -31,7 +31,7 @@ def doNotProcessReqDigest(self, _): def chk(n): assert replica._ordering_service.spylog.count(replica._ordering_service._do_order.__name__) == n - sdk_send_random_request(looper, sdk_pool_handle, sdk_wallet_client) + vdr_send_random_request(looper, vdr_pool_handle, vdr_wallet_client) timeout = delay - 5 looper.run(eventually(chk, 0, retryWait=1, timeout=timeout)) timeout = delay + 5 diff --git a/plenum/test/node_request/test_order/test_request_ordering_2.py b/plenum/test/node_request/test_order/test_request_ordering_2.py index 7e9811b7a3..f881d39b98 100644 --- a/plenum/test/node_request/test_order/test_request_ordering_2.py +++ b/plenum/test/node_request/test_order/test_request_ordering_2.py @@ -2,7 +2,7 @@ from stp_core.loop.eventually import eventually from stp_core.common.log import getlogger from plenum.common.messages.node_messages import PrePrepare, Commit -from plenum.test.helper import sdk_send_random_requests, sdk_get_and_check_replies +from plenum.test.helper import vdr_send_random_requests, vdr_get_and_check_replies from plenum.test.test_node import getNonPrimaryReplicas, getPrimaryReplica from plenum.test import waits @@ -11,7 +11,7 @@ logger = getlogger() -def testOrderingCase2(looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_client): +def testOrderingCase2(looper, txnPoolNodeSet, vdr_pool_handle, vdr_wallet_client): """ Scenario -> A client sends requests, some nodes delay COMMITs to few specific nodes such some nodes achieve commit quorum later for those @@ -61,11 +61,11 @@ def specificCommits(wrappedMsg): logger.debug('{} would be delaying commits'.format(node)) node.nodeIbStasher.delay(specificCommits) - sdk_reqs = sdk_send_random_requests(looper, sdk_pool_handle, - sdk_wallet_client, requestCount) + sdk_reqs = vdr_send_random_requests(looper, vdr_pool_handle, + vdr_wallet_client, requestCount) timeout = waits.expectedPoolGetReadyTimeout(len(txnPoolNodeSet)) ensure_all_nodes_have_same_data(looper, txnPoolNodeSet, custom_timeout=timeout) - sdk_get_and_check_replies(looper, sdk_reqs) + vdr_get_and_check_replies(looper, sdk_reqs) diff --git a/plenum/test/node_request/test_pre_prepare/test_ignore_pre_prepare_pp_seq_no_less_than_expected.py b/plenum/test/node_request/test_pre_prepare/test_ignore_pre_prepare_pp_seq_no_less_than_expected.py index 105931ee57..1e5640b67b 100644 --- a/plenum/test/node_request/test_pre_prepare/test_ignore_pre_prepare_pp_seq_no_less_than_expected.py +++ b/plenum/test/node_request/test_pre_prepare/test_ignore_pre_prepare_pp_seq_no_less_than_expected.py @@ -1,13 +1,13 @@ import pytest -from plenum.test.helper import sdk_send_random_and_check +from plenum.test.helper import vdr_send_random_and_check from plenum.test.test_node import getNonPrimaryReplicas def test_ignore_pre_prepare_pp_seq_no_less_than_expected(looper, txnPoolNodeSet, - sdk_wallet_client, - sdk_pool_handle): + vdr_wallet_client, + vdr_pool_handle): """ A node should NOT pend a pre-prepare request which has ppSeqNo less than expected. @@ -19,10 +19,10 @@ def test_ignore_pre_prepare_pp_seq_no_less_than_expected(looper, replica = getNonPrimaryReplicas(txnPoolNodeSet, instId=0)[0] replica.last_ordered_3pc = (replica.viewNo, 10) - sdk_send_random_and_check(looper, + vdr_send_random_and_check(looper, txnPoolNodeSet, - sdk_pool_handle, - sdk_wallet_client, + vdr_pool_handle, + vdr_wallet_client, count=1) assert len(replica._ordering_service.prePreparesPendingPrevPP) == 0, \ "the pending request buffer is empty" diff --git a/plenum/test/node_request/test_pre_prepare/test_pp_obsolescence_check_fail_for_delayed.py b/plenum/test/node_request/test_pre_prepare/test_pp_obsolescence_check_fail_for_delayed.py index 10c9f1aca2..1f0a8601c4 100644 --- a/plenum/test/node_request/test_pre_prepare/test_pp_obsolescence_check_fail_for_delayed.py +++ b/plenum/test/node_request/test_pre_prepare/test_pp_obsolescence_check_fail_for_delayed.py @@ -3,7 +3,7 @@ from plenum.test import waits from plenum.test.delayers import ppDelay, pDelay, cDelay -from plenum.test.helper import sdk_send_random_and_check +from plenum.test.helper import vdr_send_random_and_check from plenum.test.node_request.test_timestamp.helper import get_timestamp_suspicion_count from plenum.test.node_catchup.helper import ensure_all_nodes_have_same_data from plenum.test.stasher import delay_rules @@ -29,8 +29,8 @@ def tconf(tconf): def test_pp_obsolescence_check_fail_for_delayed(tdir, tconf, looper, txnPoolNodeSet, - sdk_pool_handle, - sdk_wallet_client): + vdr_pool_handle, + vdr_wallet_client): delay = PATCHED_ACCEPTABLE_DEVIATION_PREPREPARE_SECS + 1 lagging_node = txnPoolNodeSet[-1] @@ -40,8 +40,8 @@ def test_pp_obsolescence_check_fail_for_delayed(tdir, tconf, lagging_node.nodeIbStasher, ppDelay(), pDelay(), cDelay() ): # Order request on all nodes except lagging one - sdk_send_random_and_check(looper, txnPoolNodeSet, - sdk_pool_handle, sdk_wallet_client, 1) + vdr_send_random_and_check(looper, txnPoolNodeSet, + vdr_pool_handle, vdr_wallet_client, 1) looper.run(asyncio.sleep(delay)) # Now delayed 3PC messages reach lagging node, so any delayed transactions diff --git a/plenum/test/node_request/test_pre_prepare/test_pp_obsolescence_check_pass_for_stashed.py b/plenum/test/node_request/test_pre_prepare/test_pp_obsolescence_check_pass_for_stashed.py index 49776f3f12..a8cd9331a0 100644 --- a/plenum/test/node_request/test_pre_prepare/test_pp_obsolescence_check_pass_for_stashed.py +++ b/plenum/test/node_request/test_pre_prepare/test_pp_obsolescence_check_pass_for_stashed.py @@ -5,7 +5,7 @@ from plenum.common.messages.node_messages import MessageRep, MessageReq, CatchupReq from plenum.server.catchup.node_leecher_service import NodeLeecherService from plenum.test.delayers import DEFAULT_DELAY -from plenum.test.helper import sdk_send_random_and_check +from plenum.test.helper import vdr_send_random_and_check from plenum.test.node_request.test_timestamp.helper import get_timestamp_suspicion_count from plenum.test.node_catchup.helper import ensure_all_nodes_have_same_data from plenum.test.stasher import delay_rules @@ -38,8 +38,8 @@ def delay(msg): def test_stashed_pp_pass_obsolescence_check(tdir, tconf, looper, txnPoolNodeSet, - sdk_pool_handle, - sdk_wallet_client): + vdr_pool_handle, + vdr_wallet_client): lagging_node = txnPoolNodeSet[-1] def lagging_node_state() -> NodeLeecherService.State: @@ -47,8 +47,8 @@ def lagging_node_state() -> NodeLeecherService.State: # TODO INDY-2047: fills domain ledger with some requests # as a workaround for the issue - sdk_send_random_and_check(looper, txnPoolNodeSet, - sdk_pool_handle, sdk_wallet_client, 1) + vdr_send_random_and_check(looper, txnPoolNodeSet, + vdr_pool_handle, vdr_wallet_client, 1) # Prevent lagging node from catching up domain ledger (and finishing catchup) with delay_rules(lagging_node.nodeIbStasher, delay_audit_ledger_catchup()): @@ -57,8 +57,8 @@ def lagging_node_state() -> NodeLeecherService.State: assert lagging_node_state() == NodeLeecherService.State.SyncingAudit # Order request on all nodes except lagging one where they goes to stashed state - sdk_send_random_and_check(looper, txnPoolNodeSet, - sdk_pool_handle, sdk_wallet_client, 1) + vdr_send_random_and_check(looper, txnPoolNodeSet, + vdr_pool_handle, vdr_wallet_client, 1) # lagging node is still syncing Audit ledger assert lagging_node_state() == NodeLeecherService.State.SyncingAudit diff --git a/plenum/test/node_request/test_propagate/test_clean_verified_reqs.py b/plenum/test/node_request/test_propagate/test_clean_verified_reqs.py index 43935728a6..cf57920651 100644 --- a/plenum/test/node_request/test_propagate/test_clean_verified_reqs.py +++ b/plenum/test/node_request/test_propagate/test_clean_verified_reqs.py @@ -1,6 +1,6 @@ import pytest -from plenum.test.helper import sdk_send_random_and_check +from plenum.test.helper import vdr_send_random_and_check from plenum.test.spy_helpers import get_count from stp_core.loop.eventually import eventually @@ -21,18 +21,18 @@ def tconf(tconf): def test_clean_verified_reqs(looper, txnPoolNodeSet, - sdk_wallet_steward, - sdk_pool_handle): + vdr_wallet_steward, + vdr_pool_handle): """ As for now requests object is cleaned only after checkpoint stabilization, therefore need to forcing checkpoint sending""" def checkpoint_check(nodes): for node in nodes: assert get_count(node.master_replica._checkpointer, node.master_replica._checkpointer._mark_checkpoint_stable) > 0 - sdk_send_random_and_check(looper, + vdr_send_random_and_check(looper, txnPoolNodeSet, - sdk_pool_handle, - sdk_wallet_steward, + vdr_pool_handle, + vdr_wallet_steward, REQ_COUNT) looper.run(eventually(checkpoint_check, txnPoolNodeSet)) for node in txnPoolNodeSet: diff --git a/plenum/test/node_request/test_propagate/test_no_reauth.py b/plenum/test/node_request/test_propagate/test_no_reauth.py index 38852a4388..4895c487bf 100644 --- a/plenum/test/node_request/test_propagate/test_no_reauth.py +++ b/plenum/test/node_request/test_propagate/test_no_reauth.py @@ -1,6 +1,6 @@ import pytest -from plenum.test.helper import sdk_send_random_and_check +from plenum.test.helper import vdr_send_random_and_check from plenum.test.node_catchup.helper import ensure_all_nodes_have_same_data from plenum.test.spy_helpers import get_count from stp_core.loop.eventually import eventually @@ -10,14 +10,14 @@ def test_no_reauth(looper, txnPoolNodeSet, - sdk_wallet_steward, - sdk_pool_handle): + vdr_wallet_steward, + vdr_pool_handle): auth_obj = txnPoolNodeSet[0].authNr(0).core_authenticator auth_count_before = get_count(auth_obj, auth_obj.authenticate) - sdk_send_random_and_check(looper, + vdr_send_random_and_check(looper, txnPoolNodeSet, - sdk_pool_handle, - sdk_wallet_steward, + vdr_pool_handle, + vdr_wallet_steward, REQ_COUNT) auth_count_after = get_count(auth_obj, auth_obj.authenticate) assert auth_count_after - auth_count_before == REQ_COUNT diff --git a/plenum/test/node_request/test_propagate/test_node_lacks_finalised_requests.py b/plenum/test/node_request/test_propagate/test_node_lacks_finalised_requests.py index d42b527367..2d10a59714 100644 --- a/plenum/test/node_request/test_propagate/test_node_lacks_finalised_requests.py +++ b/plenum/test/node_request/test_propagate/test_node_lacks_finalised_requests.py @@ -3,8 +3,8 @@ from plenum.test.node_request.test_propagate.helper import sum_of_request_propagates from plenum.test.spy_helpers import get_count, getAllReturnVals from plenum.test.test_node import getNonPrimaryReplicas -from plenum.test.helper import sdk_send_random_and_check -from plenum.test.node_request.helper import sdk_ensure_pool_functional +from plenum.test.helper import vdr_send_random_and_check +from plenum.test.node_request.helper import vdr_ensure_pool_functional @pytest.fixture(scope="module") @@ -36,7 +36,7 @@ def setup(request, txnPoolNodeSet): def test_node_request_propagates(looper, setup, txnPoolNodeSet, - sdk_wallet_client, sdk_pool_handle, tconf): + vdr_wallet_client, vdr_pool_handle, tconf): """ One of node lacks sufficient propagates """ @@ -53,10 +53,10 @@ def sum_of_sent_batches(): old_count_request_propagates = sum_of_request_propagates(faulty_node) sent_reqs = 1 - sdk_send_random_and_check(looper, + vdr_send_random_and_check(looper, txnPoolNodeSet, - sdk_pool_handle, - sdk_wallet_client, + vdr_pool_handle, + vdr_wallet_client, sent_reqs) looper.runFor(tconf.PROPAGATE_REQUEST_DELAY) @@ -78,8 +78,8 @@ def sum_of_sent_batches(): old_sum_of_sent_batches) faulty_node.nodeIbStasher.reset_delays_and_process_delayeds() - sdk_ensure_pool_functional(looper, + vdr_ensure_pool_functional(looper, txnPoolNodeSet, - sdk_wallet_client, - sdk_pool_handle, + vdr_wallet_client, + vdr_pool_handle, num_reqs=4) diff --git a/plenum/test/node_request/test_propagate/test_node_request_only_needed_propagates.py b/plenum/test/node_request/test_propagate/test_node_request_only_needed_propagates.py index 393b209b64..549ca23476 100644 --- a/plenum/test/node_request/test_propagate/test_node_request_only_needed_propagates.py +++ b/plenum/test/node_request/test_propagate/test_node_request_only_needed_propagates.py @@ -4,7 +4,7 @@ from plenum.test.node_catchup.helper import ensure_all_nodes_have_same_data from plenum.server.quorums import Quorum -from plenum.test.helper import sdk_send_random_and_check +from plenum.test.helper import vdr_send_random_and_check from plenum.test.malicious_behaviors_node import dont_send_messages_to, dont_send_propagate from plenum.test.spy_helpers import get_count @@ -23,7 +23,7 @@ def setup(txnPoolNodeSet): def test_node_request_only_needed_propagates(looper, setup, txnPoolNodeSet, - sdk_wallet_client, sdk_pool_handle, tconf): + vdr_wallet_client, vdr_pool_handle, tconf): """ One of node lacks sufficient propagates """ @@ -37,10 +37,10 @@ def test_node_request_only_needed_propagates(looper, setup, txnPoolNodeSet, old_count_prop_req_gamma = get_count(txnPoolNodeSet[2], txnPoolNodeSet[2].process_message_req) sent_reqs = 5 - sdk_send_random_and_check(looper, + vdr_send_random_and_check(looper, txnPoolNodeSet, - sdk_pool_handle, - sdk_wallet_client, + vdr_pool_handle, + vdr_wallet_client, sent_reqs) looper.runFor(delay * 1.5) propagates_count = len(txnPoolNodeSet) - 1 diff --git a/plenum/test/node_request/test_propagate/test_node_request_propagates_with_delay.py b/plenum/test/node_request/test_propagate/test_node_request_propagates_with_delay.py index ecb1e0aeb5..eaee05568f 100644 --- a/plenum/test/node_request/test_propagate/test_node_request_propagates_with_delay.py +++ b/plenum/test/node_request/test_propagate/test_node_request_propagates_with_delay.py @@ -1,5 +1,5 @@ import pytest -from plenum.test.helper import sdk_send_random_and_check +from plenum.test.helper import vdr_send_random_and_check from plenum.test.delayers import ppgDelay, req_delay from plenum.test.spy_helpers import get_count @@ -15,7 +15,7 @@ def setup(txnPoolNodeSet): def test_node_request_propagates_with_delay(looper, setup, txnPoolNodeSet, - sdk_wallet_client, sdk_pool_handle, tconf): + vdr_wallet_client, vdr_pool_handle, tconf): """ One of node lacks sufficient propagates """ @@ -25,10 +25,10 @@ def test_node_request_propagates_with_delay(looper, setup, txnPoolNodeSet, old_count_recv_ppg = get_count(faulty_node, faulty_node.processPropagate) sent_reqs = 5 - sdk_send_random_and_check(looper, + vdr_send_random_and_check(looper, txnPoolNodeSet, - sdk_pool_handle, - sdk_wallet_client, + vdr_pool_handle, + vdr_wallet_client, sent_reqs) looper.runFor(delay / 4) diff --git a/plenum/test/node_request/test_quorum_disconnected.py b/plenum/test/node_request/test_quorum_disconnected.py index 5ad4bf729f..49239312a3 100644 --- a/plenum/test/node_request/test_quorum_disconnected.py +++ b/plenum/test/node_request/test_quorum_disconnected.py @@ -7,8 +7,8 @@ disconnect_node_and_ensure_disconnected, \ reconnect_node_and_ensure_connected from plenum.test.helper import check_request_is_not_returned_to_nodes, \ - sdk_send_and_check, sdk_json_to_request_object -from plenum.test.helper import sdk_signed_random_requests + vdr_send_and_check, vdr_json_to_request_object +from plenum.test.helper import vdr_signed_random_requests nodeCount = 6 # f + 1 faults, i.e, num of faults greater than system can tolerate @@ -18,8 +18,8 @@ def test_6_nodes_pool_cannot_reach_quorum_with_2_disconnected( - txnPoolNodeSet, looper, sdk_pool_handle, - sdk_wallet_client): + txnPoolNodeSet, looper, vdr_pool_handle, + vdr_wallet_client): ''' Check that we can not reach consensus when more than n-f nodes are disconnected: disconnect 2 of 6 nodes @@ -34,11 +34,11 @@ def test_6_nodes_pool_cannot_reach_quorum_with_2_disconnected( looper, current_node_set, node, stopNode=False) current_node_set.remove(node) - reqs = sdk_signed_random_requests(looper, sdk_wallet_client, 1) + reqs = vdr_signed_random_requests(looper, vdr_wallet_client, 1) with pytest.raises(PoolLedgerTimeoutException): - sdk_send_and_check(reqs, looper, txnPoolNodeSet, sdk_pool_handle) + vdr_send_and_check(reqs, looper, txnPoolNodeSet, vdr_pool_handle) check_request_is_not_returned_to_nodes( - txnPoolNodeSet, sdk_json_to_request_object(json.loads(reqs[0]))) + txnPoolNodeSet, vdr_json_to_request_object(json.loads(reqs[0]))) # The following reconnection of nodes is needed in this test to avoid # pytest process hangup diff --git a/plenum/test/node_request/test_quorum_f_plus_2_nodes_but_not_primary_off_and_on.py b/plenum/test/node_request/test_quorum_f_plus_2_nodes_but_not_primary_off_and_on.py index 2962be2092..89dfe71d23 100644 --- a/plenum/test/node_request/test_quorum_f_plus_2_nodes_but_not_primary_off_and_on.py +++ b/plenum/test/node_request/test_quorum_f_plus_2_nodes_but_not_primary_off_and_on.py @@ -3,8 +3,8 @@ from plenum.common.exceptions import PoolLedgerTimeoutException from plenum.test import waits from plenum.test.helper import checkViewNoForNodes, \ - sdk_send_random_and_check, sdk_send_random_requests, sdk_get_replies, \ - sdk_check_reply + vdr_send_random_and_check, vdr_send_random_requests, vdr_get_replies, \ + vdr_check_reply from plenum.test.pool_transactions.helper import \ disconnect_node_and_ensure_disconnected from plenum.test.test_node import ensureElectionsDone, getRequiredInstances @@ -25,12 +25,12 @@ def stop_node(node_to_stop, looper, pool_nodes): def test_quorum_after_f_plus_2_nodes_but_not_primary_turned_off_and_later_on( looper, allPluginsPath, tdir, tconf, - txnPoolNodeSet, sdk_pool_handle, sdk_wallet_client): + txnPoolNodeSet, vdr_pool_handle, vdr_wallet_client): nodes = txnPoolNodeSet - sdk_send_random_and_check(looper, txnPoolNodeSet, - sdk_pool_handle, - sdk_wallet_client, + vdr_send_random_and_check(looper, txnPoolNodeSet, + vdr_pool_handle, + vdr_wallet_client, 1) stop_node(nodes[4], looper, nodes) @@ -38,9 +38,9 @@ def test_quorum_after_f_plus_2_nodes_but_not_primary_turned_off_and_later_on( waits.expectedPoolElectionTimeout(len(nodes))) checkViewNoForNodes(nodes[:4], expectedViewNo=0) - sdk_send_random_and_check(looper, txnPoolNodeSet, - sdk_pool_handle, - sdk_wallet_client, + vdr_send_random_and_check(looper, txnPoolNodeSet, + vdr_pool_handle, + vdr_wallet_client, 1) stop_node(nodes[3], looper, nodes) @@ -48,56 +48,56 @@ def test_quorum_after_f_plus_2_nodes_but_not_primary_turned_off_and_later_on( waits.expectedPoolElectionTimeout(len(nodes))) checkViewNoForNodes(nodes[:3], expectedViewNo=0) - sdk_reqs3 = sdk_send_random_requests(looper, - sdk_pool_handle, - sdk_wallet_client, + sdk_reqs3 = vdr_send_random_requests(looper, + vdr_pool_handle, + vdr_wallet_client, 1) with pytest.raises(PoolLedgerTimeoutException): - req_res = sdk_get_replies(looper, sdk_reqs3) - sdk_check_reply(req_res[0]) + req_res = vdr_get_replies(looper, sdk_reqs3) + vdr_check_reply(req_res[0]) stop_node(nodes[2], looper, nodes) looper.runFor(tconf.ToleratePrimaryDisconnection + waits.expectedPoolElectionTimeout(len(nodes))) checkViewNoForNodes(nodes[:2], expectedViewNo=0) - sdk_reqs4 = sdk_send_random_requests(looper, - sdk_pool_handle, - sdk_wallet_client, + sdk_reqs4 = vdr_send_random_requests(looper, + vdr_pool_handle, + vdr_wallet_client, 1) with pytest.raises(PoolLedgerTimeoutException): - req_res = sdk_get_replies(looper, sdk_reqs4) - sdk_check_reply(req_res[0]) + req_res = vdr_get_replies(looper, sdk_reqs4) + vdr_check_reply(req_res[0]) nodes[4] = start_stopped_node(nodes[4], looper, tconf, tdir, allPluginsPath) looper.runFor(waits.expectedPoolElectionTimeout(len(nodes))) checkViewNoForNodes(nodes[:2] + nodes[4:], expectedViewNo=0) - sdk_reqs5 = sdk_send_random_requests(looper, - sdk_pool_handle, - sdk_wallet_client, + sdk_reqs5 = vdr_send_random_requests(looper, + vdr_pool_handle, + vdr_wallet_client, 1) with pytest.raises(PoolLedgerTimeoutException): - req_res = sdk_get_replies(looper, sdk_reqs5) - sdk_check_reply(req_res[0]) + req_res = vdr_get_replies(looper, sdk_reqs5) + vdr_check_reply(req_res[0]) nodes[3] = start_stopped_node(nodes[3], looper, tconf, tdir, allPluginsPath) ensureElectionsDone(looper, nodes[:2] + nodes[3:], instances_list=range(getRequiredInstances(nodeCount))) checkViewNoForNodes(nodes[:2] + nodes[3:], expectedViewNo=0) - sdk_reqs6 = sdk_send_random_requests(looper, - sdk_pool_handle, - sdk_wallet_client, + sdk_reqs6 = vdr_send_random_requests(looper, + vdr_pool_handle, + vdr_wallet_client, 1) - sdk_get_replies(looper, sdk_reqs6) + vdr_get_replies(looper, sdk_reqs6) nodes[2] = start_stopped_node(nodes[2], looper, tconf, tdir, allPluginsPath) ensureElectionsDone(looper, nodes, instances_list=range(getRequiredInstances(nodeCount))) checkViewNoForNodes(nodes, expectedViewNo=0) - sdk_send_random_and_check(looper, txnPoolNodeSet, - sdk_pool_handle, - sdk_wallet_client, + vdr_send_random_and_check(looper, txnPoolNodeSet, + vdr_pool_handle, + vdr_wallet_client, 1) diff --git a/plenum/test/node_request/test_quorum_f_plus_2_nodes_including_primary_off_and_on.py b/plenum/test/node_request/test_quorum_f_plus_2_nodes_including_primary_off_and_on.py index 7685810d71..e4b67f6f87 100644 --- a/plenum/test/node_request/test_quorum_f_plus_2_nodes_including_primary_off_and_on.py +++ b/plenum/test/node_request/test_quorum_f_plus_2_nodes_including_primary_off_and_on.py @@ -3,8 +3,8 @@ from plenum.common.exceptions import PoolLedgerTimeoutException from plenum.test import waits from plenum.test.helper import waitForViewChange, checkViewNoForNodes, \ - sdk_send_random_and_check, sdk_send_random_requests, sdk_get_replies, \ - sdk_check_reply, sdk_eval_timeout + vdr_send_random_and_check, vdr_send_random_requests, vdr_get_replies, \ + vdr_check_reply, vdr_eval_timeout from plenum.test.pool_transactions.helper import \ disconnect_node_and_ensure_disconnected from plenum.test.test_node import ensureElectionsDone, getRequiredInstances @@ -25,14 +25,14 @@ def stop_node(node_to_stop, looper, pool_nodes): def test_quorum_after_f_plus_2_nodes_including_primary_turned_off_and_later_on( looper, allPluginsPath, tdir, tconf, txnPoolNodeSet, - sdk_pool_handle, - sdk_wallet_client): - timeout = sdk_eval_timeout(1, len(txnPoolNodeSet)) + vdr_pool_handle, + vdr_wallet_client): + timeout = vdr_eval_timeout(1, len(txnPoolNodeSet)) nodes = txnPoolNodeSet - sdk_send_random_and_check(looper, txnPoolNodeSet, - sdk_pool_handle, - sdk_wallet_client, + vdr_send_random_and_check(looper, txnPoolNodeSet, + vdr_pool_handle, + vdr_wallet_client, 1) stop_node(nodes[0], looper, nodes) @@ -40,9 +40,9 @@ def test_quorum_after_f_plus_2_nodes_including_primary_turned_off_and_later_on( ensureElectionsDone(looper, nodes[1:], instances_list=range(getRequiredInstances(nodeCount))) - sdk_send_random_and_check(looper, txnPoolNodeSet, - sdk_pool_handle, - sdk_wallet_client, + vdr_send_random_and_check(looper, txnPoolNodeSet, + vdr_pool_handle, + vdr_wallet_client, 1) stop_node(nodes[1], looper, nodes) @@ -50,38 +50,38 @@ def test_quorum_after_f_plus_2_nodes_including_primary_turned_off_and_later_on( waits.expectedPoolElectionTimeout(len(nodes))) checkViewNoForNodes(nodes[2:], expectedViewNo=1) - sdk_reqs3 = sdk_send_random_requests(looper, - sdk_pool_handle, - sdk_wallet_client, + sdk_reqs3 = vdr_send_random_requests(looper, + vdr_pool_handle, + vdr_wallet_client, 1) with pytest.raises(PoolLedgerTimeoutException): - req_res = sdk_get_replies(looper, sdk_reqs3, timeout=timeout) - sdk_check_reply(req_res[0]) + req_res = vdr_get_replies(looper, sdk_reqs3, timeout=timeout) + vdr_check_reply(req_res[0]) stop_node(nodes[2], looper, nodes) looper.runFor(tconf.ToleratePrimaryDisconnection + waits.expectedPoolElectionTimeout(len(nodes))) checkViewNoForNodes(nodes[3:], expectedViewNo=1) - sdk_reqs4 = sdk_send_random_requests(looper, - sdk_pool_handle, - sdk_wallet_client, + sdk_reqs4 = vdr_send_random_requests(looper, + vdr_pool_handle, + vdr_wallet_client, 1) with pytest.raises(PoolLedgerTimeoutException): - req_res = sdk_get_replies(looper, sdk_reqs4, timeout=timeout) - sdk_check_reply(req_res[0]) + req_res = vdr_get_replies(looper, sdk_reqs4, timeout=timeout) + vdr_check_reply(req_res[0]) nodes[2] = start_stopped_node(nodes[2], looper, tconf, tdir, allPluginsPath) looper.runFor(waits.expectedPoolElectionTimeout(len(nodes))) checkViewNoForNodes(nodes[3:], expectedViewNo=1) - sdk_reqs5 = sdk_send_random_requests(looper, - sdk_pool_handle, - sdk_wallet_client, + sdk_reqs5 = vdr_send_random_requests(looper, + vdr_pool_handle, + vdr_wallet_client, 1) with pytest.raises(PoolLedgerTimeoutException): - req_res = sdk_get_replies(looper, sdk_reqs5, timeout=timeout) - sdk_check_reply(req_res[0]) + req_res = vdr_get_replies(looper, sdk_reqs5, timeout=timeout) + vdr_check_reply(req_res[0]) nodes[1] = start_stopped_node(nodes[1], looper, tconf, tdir, allPluginsPath) ensureElectionsDone(looper, nodes[1:], @@ -89,9 +89,9 @@ def test_quorum_after_f_plus_2_nodes_including_primary_turned_off_and_later_on( customTimeout=60) checkViewNoForNodes(nodes[1:], expectedViewNo=1) - sdk_send_random_and_check(looper, txnPoolNodeSet, - sdk_pool_handle, - sdk_wallet_client, + vdr_send_random_and_check(looper, txnPoolNodeSet, + vdr_pool_handle, + vdr_wallet_client, 1) nodes[0] = start_stopped_node(nodes[0], looper, tconf, tdir, allPluginsPath) @@ -100,7 +100,7 @@ def test_quorum_after_f_plus_2_nodes_including_primary_turned_off_and_later_on( customTimeout=60) checkViewNoForNodes(nodes, expectedViewNo=1) - sdk_send_random_and_check(looper, txnPoolNodeSet, - sdk_pool_handle, - sdk_wallet_client, + vdr_send_random_and_check(looper, txnPoolNodeSet, + vdr_pool_handle, + vdr_wallet_client, 1) diff --git a/plenum/test/node_request/test_quorum_faulty.py b/plenum/test/node_request/test_quorum_faulty.py index 220aa15b75..456a82d005 100644 --- a/plenum/test/node_request/test_quorum_faulty.py +++ b/plenum/test/node_request/test_quorum_faulty.py @@ -7,7 +7,7 @@ from plenum.test.node_request.helper import nodes_by_rank from stp_core.common.util import adict from plenum.test.helper import check_request_is_not_returned_to_nodes, \ - sdk_send_and_check, sdk_json_to_request_object, sdk_signed_random_requests + vdr_send_and_check, vdr_json_to_request_object, vdr_signed_random_requests from plenum.test.malicious_behaviors_node import makeNodeFaulty, \ delaysPrePrepareProcessing, \ changesRequest @@ -39,9 +39,9 @@ def afterElection(setup): def test_6_nodes_pool_cannot_reach_quorum_with_2_faulty(afterElection, looper, txnPoolNodeSet, prepared1, - sdk_wallet_client, sdk_pool_handle): - reqs = sdk_signed_random_requests(looper, sdk_wallet_client, 1) + vdr_wallet_client, vdr_pool_handle): + reqs = vdr_signed_random_requests(looper, vdr_wallet_client, 1) with pytest.raises(PoolLedgerTimeoutException): - sdk_send_and_check(reqs, looper, txnPoolNodeSet, sdk_pool_handle) + vdr_send_and_check(reqs, looper, txnPoolNodeSet, vdr_pool_handle) check_request_is_not_returned_to_nodes( - txnPoolNodeSet, sdk_json_to_request_object(json.loads(reqs[0]))) + txnPoolNodeSet, vdr_json_to_request_object(json.loads(reqs[0]))) diff --git a/plenum/test/node_request/test_reply_from_ledger_for_request.py b/plenum/test/node_request/test_reply_from_ledger_for_request.py index 2459c7fb9d..a50e82acfa 100644 --- a/plenum/test/node_request/test_reply_from_ledger_for_request.py +++ b/plenum/test/node_request/test_reply_from_ledger_for_request.py @@ -7,7 +7,8 @@ from plenum.common.request import Request from plenum.common.txn_util import reqToTxn from plenum.common.types import f, OPERATION -from plenum.test.helper import sdk_random_request_objects, sdk_sign_request_objects, sdk_multisign_request_object +from plenum.test.helper import vdr_random_request_objects, vdr_sign_request_objects, vdr_multisign_request_object +from indy_vdr import request @pytest.fixture @@ -36,13 +37,15 @@ def deserialize_req(req): protocolVersion=req.get(f.PROTOCOL_VERSION.nm, None) ) req = Request(**kwargs) + if isinstance(req, request.Request): + req = req return req -def test_seq_no_db_signed_request(looper, node, sdk_wallet_client): +def test_seq_no_db_signed_request(looper, node, vdr_wallet_client): # Create signed request and write it to ledger - req = sdk_random_request_objects(1, identifier=sdk_wallet_client[1], protocol_version=CURRENT_PROTOCOL_VERSION)[0] - req = sdk_sign_request_objects(looper, sdk_wallet_client, [req])[0] + req = vdr_random_request_objects(1, identifier=vdr_wallet_client[1], protocol_version=CURRENT_PROTOCOL_VERSION)[0] + req = vdr_sign_request_objects(looper, vdr_wallet_client, [req])[0] req = deserialize_req(req) write_request(node, req) @@ -51,10 +54,10 @@ def test_seq_no_db_signed_request(looper, node, sdk_wallet_client): assert isinstance(rep, Reply) -def test_seq_no_db_multisigned_request(looper, node, sdk_wallet_client, sdk_wallet_client2): +def test_seq_no_db_multisigned_request(looper, node, vdr_wallet_client, vdr_wallet_client2): # Create signed request and write it to ledger - req = sdk_random_request_objects(1, identifier=sdk_wallet_client[1], protocol_version=CURRENT_PROTOCOL_VERSION)[0] - req = sdk_multisign_request_object(looper, sdk_wallet_client, json.dumps(req.as_dict)) + req = vdr_random_request_objects(1, identifier=vdr_wallet_client[1], protocol_version=CURRENT_PROTOCOL_VERSION)[0] + req = vdr_multisign_request_object(looper, vdr_wallet_client, json.dumps(req.as_dict)) req = deserialize_req(req) write_request(node, req) @@ -63,15 +66,15 @@ def test_seq_no_db_multisigned_request(looper, node, sdk_wallet_client, sdk_wall assert isinstance(rep, Reply) # Make sure sending request with additional signature will return NACK - multisig_req = sdk_multisign_request_object(looper, sdk_wallet_client2, json.dumps(req.as_dict)) + multisig_req = vdr_multisign_request_object(looper, vdr_wallet_client2, json.dumps(req.as_dict)) multisig_req = deserialize_req(multisig_req) rep = node.getReplyFromLedgerForRequest(multisig_req) assert isinstance(rep, RequestNack) -def test_seq_no_db_unsigned_request(looper, node, sdk_wallet_client): +def test_seq_no_db_unsigned_request(looper, node, vdr_wallet_client): # Create unsigned request and write it to ledger - req = sdk_random_request_objects(1, identifier=sdk_wallet_client[1], protocol_version=CURRENT_PROTOCOL_VERSION)[0] + req = vdr_random_request_objects(1, identifier=vdr_wallet_client[1], protocol_version=CURRENT_PROTOCOL_VERSION)[0] write_request(node, req) # Make sure sending request again will return REPLY @@ -79,7 +82,7 @@ def test_seq_no_db_unsigned_request(looper, node, sdk_wallet_client): assert isinstance(rep, Reply) # Make sure sending request with signature will return NACK - signed_req = sdk_sign_request_objects(looper, sdk_wallet_client, [req])[0] + signed_req = vdr_sign_request_objects(looper, vdr_wallet_client, [req])[0] signed_req = deserialize_req(signed_req) rep = node.getReplyFromLedgerForRequest(signed_req) assert isinstance(rep, RequestNack) diff --git a/plenum/test/node_request/test_request_forwarding.py b/plenum/test/node_request/test_request_forwarding.py index 319c770767..544fd98dca 100644 --- a/plenum/test/node_request/test_request_forwarding.py +++ b/plenum/test/node_request/test_request_forwarding.py @@ -3,16 +3,16 @@ from plenum.test.batching_3pc.conftest import tconf from stp_core.loop.eventually import eventually from plenum.test.view_change.conftest import perf_chk_patched -from plenum.test.helper import sdk_send_signed_requests, sdk_get_replies, \ - sdk_signed_random_requests, sdk_eval_timeout +from plenum.test.helper import vdr_send_signed_requests, vdr_get_replies, \ + vdr_signed_random_requests, vdr_eval_timeout def test_all_replicas_hold_request_keys( perf_chk_patched, looper, txnPoolNodeSet, - sdk_wallet_client, - sdk_pool_handle): + vdr_wallet_client, + vdr_pool_handle): """ All replicas whether primary or non primary hold request keys of forwarded requests. Once requests are ordered, they request keys are removed from replica. @@ -35,13 +35,13 @@ def chk(count): elif r.isPrimary is True: assert len(r._ordering_service.requestQueues[DOMAIN_LEDGER_ID]) == 0 - reqs = sdk_signed_random_requests(looper, - sdk_wallet_client, + reqs = vdr_signed_random_requests(looper, + vdr_wallet_client, tconf.Max3PCBatchSize - 1) - req_resps = sdk_send_signed_requests(sdk_pool_handle, reqs) + req_resps = vdr_send_signed_requests(vdr_pool_handle, reqs, looper) # Only non primary replicas should have all request keys with them looper.run(eventually(chk, tconf.Max3PCBatchSize - 1)) - sdk_get_replies(looper, req_resps, timeout=sdk_eval_timeout( + vdr_get_replies(looper, req_resps, timeout=vdr_eval_timeout( tconf.Max3PCBatchSize - 1, len(txnPoolNodeSet), add_delay_to_timeout=delay_3pc)) # Replicas should have no request keys with them since they are ordered diff --git a/plenum/test/node_request/test_send_node_with_invalid_verkey.py b/plenum/test/node_request/test_send_node_with_invalid_verkey.py index 8a2f90e3cf..74ec68482a 100644 --- a/plenum/test/node_request/test_send_node_with_invalid_verkey.py +++ b/plenum/test/node_request/test_send_node_with_invalid_verkey.py @@ -6,20 +6,20 @@ from plenum.common.constants import STEWARD_STRING, VALIDATOR, VERKEY from plenum.common.exceptions import RequestNackedException from plenum.common.util import randomString -from plenum.test.helper import sdk_get_bad_response -from plenum.test.pool_transactions.helper import sdk_add_new_nym, prepare_new_node_data, prepare_node_request, \ - sdk_sign_and_send_prepared_request, sdk_change_node_keys +from plenum.test.helper import vdr_get_bad_response +from plenum.test.pool_transactions.helper import vdr_add_new_nym, prepare_new_node_data, vdr_prepare_node_request, \ + vdr_sign_and_send_prepared_request, vdr_change_node_keys invalid_dest = 'a' * 43 -def test_send_node_with_invalid_dest_verkey(looper, sdk_pool_handle, - sdk_wallet_steward, tdir, tconf): +def test_send_node_with_invalid_dest_verkey(looper, vdr_pool_handle, + vdr_wallet_steward, tdir, tconf): node_name = "Psi" new_steward_name = "testClientSteward" + randomString(3) - new_steward_wallet_handle = sdk_add_new_nym(looper, - sdk_pool_handle, - sdk_wallet_steward, + new_steward_wallet_handle = vdr_add_new_nym(looper, + vdr_pool_handle, + vdr_wallet_steward, alias=new_steward_name, role=STEWARD_STRING) sigseed, verkey, bls_key, nodeIp, nodePort, clientIp, clientPort, key_proof = \ @@ -30,7 +30,7 @@ def test_send_node_with_invalid_dest_verkey(looper, sdk_pool_handle, _, steward_did = new_steward_wallet_handle node_request = looper.loop.run_until_complete( - prepare_node_request(steward_did, + vdr_prepare_node_request(steward_did, new_node_name=node_name, clientIp=clientIp, clientPort=clientPort, @@ -41,13 +41,13 @@ def test_send_node_with_invalid_dest_verkey(looper, sdk_pool_handle, services=[VALIDATOR], key_proof=key_proof)) - request_couple = sdk_sign_and_send_prepared_request(looper, new_steward_wallet_handle, - sdk_pool_handle, node_request) - sdk_get_bad_response(looper, [request_couple], RequestNackedException, + request_couple = vdr_sign_and_send_prepared_request(looper, new_steward_wallet_handle, + vdr_pool_handle, node_request) + vdr_get_bad_response(looper, [request_couple], RequestNackedException, 'Node\'s dest is not correct Ed25519 key.') node_request = looper.loop.run_until_complete( - prepare_node_request(steward_did, + vdr_prepare_node_request(steward_did, new_node_name=node_name, clientIp=clientIp, clientPort=clientPort, @@ -62,13 +62,13 @@ def test_send_node_with_invalid_dest_verkey(looper, sdk_pool_handle, node_request['operation'][VERKEY] = invalid_dest node_request = json.dumps(node_request) - request_couple = sdk_sign_and_send_prepared_request(looper, new_steward_wallet_handle, - sdk_pool_handle, node_request) - sdk_get_bad_response(looper, [request_couple], RequestNackedException, + request_couple = vdr_sign_and_send_prepared_request(looper, new_steward_wallet_handle, + vdr_pool_handle, node_request) + vdr_get_bad_response(looper, [request_couple], RequestNackedException, 'Node\'s verkey is not correct Ed25519 key.') -def test_edit_node_with_invalid_verkey(looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_steward): +def test_edit_node_with_invalid_verkey(looper, txnPoolNodeSet, vdr_pool_handle, vdr_wallet_steward): with pytest.raises(RequestNackedException) as e: - sdk_change_node_keys(looper, txnPoolNodeSet[0], sdk_wallet_steward, sdk_pool_handle, invalid_dest) + vdr_change_node_keys(looper, txnPoolNodeSet[0], vdr_wallet_steward, vdr_pool_handle, invalid_dest) e.match('Node\'s verkey is not correct Ed25519 key.') diff --git a/plenum/test/node_request/test_setup_for_non_master.py b/plenum/test/node_request/test_setup_for_non_master.py index 77fab2eb64..a8691eabdc 100644 --- a/plenum/test/node_request/test_setup_for_non_master.py +++ b/plenum/test/node_request/test_setup_for_non_master.py @@ -1,9 +1,9 @@ import pytest from plenum.common.util import compare_3PC_keys -from plenum.test.helper import sdk_send_random_and_check, waitForViewChange +from plenum.test.helper import vdr_send_random_and_check, waitForViewChange from plenum.test.node_catchup.helper import waitNodeDataEquality -from plenum.test.pool_transactions.helper import sdk_add_new_steward_and_node +from plenum.test.pool_transactions.helper import vdr_add_new_steward_and_node from plenum.test.spy_helpers import get_count from plenum.test.test_node import checkNodesConnected from stp_core.common.log import getlogger @@ -27,15 +27,15 @@ def backup_replicas_synced(nodes, last_ordered): def test_integration_setup_last_ordered_after_catchup(looper, txnPoolNodeSet, - sdk_wallet_steward, - sdk_wallet_client, - sdk_pool_handle, tdir, + vdr_wallet_steward, + vdr_wallet_client, + vdr_pool_handle, tdir, tconf, allPluginsPath): start_view_no = txnPoolNodeSet[0].viewNo - sdk_send_random_and_check(looper, txnPoolNodeSet, - sdk_pool_handle, sdk_wallet_client, 1) - _, new_node = sdk_add_new_steward_and_node( - looper, sdk_pool_handle, sdk_wallet_steward, + vdr_send_random_and_check(looper, txnPoolNodeSet, + vdr_pool_handle, vdr_wallet_client, 1) + _, new_node = vdr_add_new_steward_and_node( + looper, vdr_pool_handle, vdr_wallet_steward, 'EpsilonSteward', 'Epsilon', tdir, tconf, allPluginsPath=allPluginsPath) txnPoolNodeSet.append(new_node) @@ -43,8 +43,8 @@ def test_integration_setup_last_ordered_after_catchup(looper, txnPoolNodeSet, waitForViewChange(looper, txnPoolNodeSet, expectedViewNo=start_view_no + 1) waitNodeDataEquality(looper, new_node, *txnPoolNodeSet[:-1], exclude_from_check=['check_last_ordered_3pc_backup']) - sdk_send_random_and_check(looper, txnPoolNodeSet, - sdk_pool_handle, sdk_wallet_client, 1) + vdr_send_random_and_check(looper, txnPoolNodeSet, + vdr_pool_handle, vdr_wallet_client, 1) looper.run(eventually(backup_replicas_synced, txnPoolNodeSet, (start_view_no + 1, 2))) for node in txnPoolNodeSet: for replica in node.replicas.values(): diff --git a/plenum/test/node_request/test_split_non_3pc_messages_on_batches.py b/plenum/test/node_request/test_split_non_3pc_messages_on_batches.py index 2b3eb9e020..193117ee51 100644 --- a/plenum/test/node_request/test_split_non_3pc_messages_on_batches.py +++ b/plenum/test/node_request/test_split_non_3pc_messages_on_batches.py @@ -1,13 +1,13 @@ -from plenum.test.helper import sdk_send_random_requests, sdk_eval_timeout, \ - sdk_get_and_check_replies +from plenum.test.helper import vdr_send_random_requests, vdr_eval_timeout, \ + vdr_get_and_check_replies from stp_core.validators.message_length_validator import MessageLenValidator def test_msg_max_length_check_node_to_node(looper, txnPoolNodeSet, - sdk_pool_handle, - sdk_wallet_client, - sdk_wallet_client2): + vdr_pool_handle, + vdr_wallet_client, + vdr_wallet_client2): """ Two clients send 2*N requests each at the same time. N < MSG_LEN_LIMIT but 2*N > MSG_LEN_LIMIT so the requests pass the max @@ -21,12 +21,12 @@ def test_msg_max_length_check_node_to_node(looper, patch_msg_len_validators(max_len_limit, txnPoolNodeSet) - reqs1 = sdk_send_random_requests(looper, sdk_pool_handle, sdk_wallet_client, N) - reqs2 = sdk_send_random_requests(looper, sdk_pool_handle, sdk_wallet_client2, N) + reqs1 = vdr_send_random_requests(looper, vdr_pool_handle, vdr_wallet_client, N) + reqs2 = vdr_send_random_requests(looper, vdr_pool_handle, vdr_wallet_client2, N) - total_timeout = sdk_eval_timeout(N, len(txnPoolNodeSet)) - sdk_get_and_check_replies(looper, reqs1, timeout=total_timeout) - sdk_get_and_check_replies(looper, reqs2, timeout=total_timeout) + total_timeout = vdr_eval_timeout(N, len(txnPoolNodeSet)) + vdr_get_and_check_replies(looper, reqs1, timeout=total_timeout) + vdr_get_and_check_replies(looper, reqs2, timeout=total_timeout) def patch_msg_len_validators(max_len_limit, txnPoolNodeSet): diff --git a/plenum/test/node_request/test_timestamp/test_3pc_timestamp.py b/plenum/test/node_request/test_timestamp/test_3pc_timestamp.py index 50fa9ee0cd..edcbb2b524 100644 --- a/plenum/test/node_request/test_timestamp/test_3pc_timestamp.py +++ b/plenum/test/node_request/test_timestamp/test_3pc_timestamp.py @@ -11,7 +11,7 @@ from plenum.test.test_node import getNonPrimaryReplicas from plenum.common.txn_util import get_txn_time, get_payload_data -from plenum.test.helper import sdk_send_random_and_check +from plenum.test.helper import vdr_send_random_and_check @pytest.fixture(scope="module") @@ -22,16 +22,16 @@ def tconf(tconf): tconf.Max3PCBatchSize = oldMax3PCBatchSize -def test_replicas_prepare_time(looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_client): +def test_replicas_prepare_time(looper, txnPoolNodeSet, vdr_pool_handle, vdr_wallet_client): last_domain_seq_no = txnPoolNodeSet[0].domainLedger.size + 1 # Check that each replica's PREPARE time is same as the PRE-PREPARE time sent_batches = 5 for i in range(sent_batches): - sdk_send_random_and_check(looper, + vdr_send_random_and_check(looper, txnPoolNodeSet, - sdk_pool_handle, - sdk_wallet_client, + vdr_pool_handle, + vdr_wallet_client, count=2) looper.runFor(1) @@ -65,15 +65,15 @@ def test_replicas_prepare_time(looper, txnPoolNodeSet, sdk_pool_handle, sdk_wall def test_non_primary_accepts_pre_prepare_time(looper, txnPoolNodeSet, - sdk_wallet_client, sdk_pool_handle): + vdr_wallet_client, vdr_pool_handle): """ One of the non-primary has an in-correct clock so it thinks PRE-PREPARE has incorrect time """ - sdk_send_random_and_check(looper, + vdr_send_random_and_check(looper, txnPoolNodeSet, - sdk_pool_handle, - sdk_wallet_client, + vdr_pool_handle, + vdr_wallet_client, count=2) # send_reqs_to_nodes_and_verify_all_replies(looper, wallet1, client1, 2) # The replica having the bad clock @@ -84,10 +84,10 @@ def test_non_primary_accepts_pre_prepare_time(looper, txnPoolNodeSet, old_acceptable_rvs = getAllReturnVals( confused_npr._ordering_service, confused_npr._ordering_service._is_pre_prepare_time_acceptable) old_susp_count = get_timestamp_suspicion_count(confused_npr.node) - sdk_send_random_and_check(looper, + vdr_send_random_and_check(looper, txnPoolNodeSet, - sdk_pool_handle, - sdk_wallet_client, + vdr_pool_handle, + vdr_wallet_client, count=2) assert get_timestamp_suspicion_count(confused_npr.node) > old_susp_count diff --git a/plenum/test/node_request/test_timestamp/test_clock_disruption.py b/plenum/test/node_request/test_timestamp/test_clock_disruption.py index 4e3c616201..50c24958f5 100644 --- a/plenum/test/node_request/test_timestamp/test_clock_disruption.py +++ b/plenum/test/node_request/test_timestamp/test_clock_disruption.py @@ -8,7 +8,7 @@ from plenum.test.node_request.test_timestamp.helper import make_clock_faulty, \ get_timestamp_suspicion_count -from plenum.test.helper import sdk_send_random_and_check, sdk_send_random_request +from plenum.test.helper import vdr_send_random_and_check, vdr_send_random_request Max3PCBatchSize = 4 @@ -20,16 +20,16 @@ @pytest.mark.skip(reason='Pending implementation') def test_nodes_with_bad_clock(tconf, looper, txnPoolNodeSet, - sdk_wallet_client, sdk_pool_handle): + vdr_wallet_client, vdr_pool_handle): """ All nodes have bad clocks but they eventaully get repaired, an example of nodes being cut off from NTP server for some time or NTP sync disabled then without node restart NTP sync enabled """ - sdk_send_random_and_check(looper, + vdr_send_random_and_check(looper, txnPoolNodeSet, - sdk_pool_handle, - sdk_wallet_client, + vdr_pool_handle, + vdr_wallet_client, count=Max3PCBatchSize * 3) ledger_sizes = {node.name: node.domainLedger.size for node in txnPoolNodeSet} @@ -45,7 +45,7 @@ def test_nodes_with_bad_clock(tconf, looper, txnPoolNodeSet, ppr_always_wrong=False) for _ in range(5): - sdk_send_random_request(looper, sdk_pool_handle, sdk_wallet_client) + vdr_send_random_request(looper, vdr_pool_handle, vdr_wallet_client) looper.runFor(.2) # Let some time pass @@ -71,8 +71,8 @@ def utc_epoch(self) -> int: looper.runFor(3) # All nodes reply - sdk_send_random_and_check(looper, + vdr_send_random_and_check(looper, txnPoolNodeSet, - sdk_pool_handle, - sdk_wallet_client, + vdr_pool_handle, + vdr_wallet_client, count=Max3PCBatchSize * 2) diff --git a/plenum/test/node_request/test_timestamp/test_timestamp_new_node.py b/plenum/test/node_request/test_timestamp/test_timestamp_new_node.py index 09e4976e84..3c5130f3b2 100644 --- a/plenum/test/node_request/test_timestamp/test_timestamp_new_node.py +++ b/plenum/test/node_request/test_timestamp/test_timestamp_new_node.py @@ -3,7 +3,7 @@ get_timestamp_suspicion_count from plenum.test.test_node import ensureElectionsDone from plenum.test.view_change.helper import ensure_view_change -from plenum.test.helper import sdk_send_random_and_check +from plenum.test.helper import vdr_send_random_and_check txnCount = 20 Max3PCBatchSize = 4 @@ -13,18 +13,18 @@ def test_new_node_accepts_timestamp(tconf, looper, txnPoolNodeSet, - sdk_node_created_after_some_txns, - sdk_wallet_client, sdk_pool_handle): + vdr_node_created_after_some_txns, + vdr_wallet_client, vdr_pool_handle): """ A new node joins the pool and is able to function properly without """ - _, new_node, _, _ = sdk_node_created_after_some_txns + _, new_node, _, _ = vdr_node_created_after_some_txns old_susp_count = get_timestamp_suspicion_count(new_node) # Don't wait for node to catchup, start sending requests - sdk_send_random_and_check(looper, + vdr_send_random_and_check(looper, txnPoolNodeSet, - sdk_pool_handle, - sdk_wallet_client, + vdr_pool_handle, + vdr_wallet_client, count=10) waitNodeDataEquality(looper, new_node, *txnPoolNodeSet[:-1]) @@ -32,10 +32,10 @@ def test_new_node_accepts_timestamp(tconf, looper, txnPoolNodeSet, assert get_timestamp_suspicion_count(new_node) == old_susp_count # All nodes should reply - sdk_send_random_and_check(looper, + vdr_send_random_and_check(looper, txnPoolNodeSet, - sdk_pool_handle, - sdk_wallet_client, + vdr_pool_handle, + vdr_wallet_client, count=Max3PCBatchSize * 3) # No suspicions were raised by new_node assert get_timestamp_suspicion_count(new_node) == old_susp_count @@ -44,10 +44,10 @@ def test_new_node_accepts_timestamp(tconf, looper, txnPoolNodeSet, node) for node in txnPoolNodeSet} ensure_view_change(looper, txnPoolNodeSet) ensureElectionsDone(looper=looper, nodes=txnPoolNodeSet) - sdk_send_random_and_check(looper, + vdr_send_random_and_check(looper, txnPoolNodeSet, - sdk_pool_handle, - sdk_wallet_client, + vdr_pool_handle, + vdr_wallet_client, count=Max3PCBatchSize * 3) for node in txnPoolNodeSet: assert suspicions[node.name] == get_timestamp_suspicion_count(node) diff --git a/plenum/test/node_request/test_timestamp/test_timestamp_post_view_change.py b/plenum/test/node_request/test_timestamp/test_timestamp_post_view_change.py index d3d6a004c5..f30b3354ae 100644 --- a/plenum/test/node_request/test_timestamp/test_timestamp_post_view_change.py +++ b/plenum/test/node_request/test_timestamp/test_timestamp_post_view_change.py @@ -6,7 +6,7 @@ get_timestamp_suspicion_count from plenum.test.test_node import ensureElectionsDone, getNonPrimaryReplicas from plenum.test.view_change.helper import ensure_view_change -from plenum.test.helper import sdk_send_random_and_check +from plenum.test.helper import vdr_send_random_and_check Max3PCBatchSize = 4 @@ -17,7 +17,7 @@ def test_new_primary_has_wrong_clock(tconf, looper, txnPoolNodeSet, - sdk_wallet_client, sdk_pool_handle): + vdr_wallet_client, vdr_pool_handle): """ One of non-primary has a bad clock, it raises suspicions but orders requests after getting PREPAREs. Then a view change happens this @@ -61,8 +61,8 @@ def chk(): looper.run(eventually(chk, retryWait=1)) # All nodes reply - sdk_send_random_and_check(looper, + vdr_send_random_and_check(looper, txnPoolNodeSet, - sdk_pool_handle, - sdk_wallet_client, + vdr_pool_handle, + vdr_wallet_client, count=Max3PCBatchSize * 2) diff --git a/plenum/test/node_request/test_unit_setup_for_non_master.py b/plenum/test/node_request/test_unit_setup_for_non_master.py index 00c2a11e13..ccde2fe2f9 100644 --- a/plenum/test/node_request/test_unit_setup_for_non_master.py +++ b/plenum/test/node_request/test_unit_setup_for_non_master.py @@ -5,9 +5,9 @@ from plenum.common.messages.node_messages import PrePrepare, Prepare from plenum.common.util import compare_3PC_keys -from plenum.test.helper import sdk_send_random_and_check, init_discarded +from plenum.test.helper import vdr_send_random_and_check, init_discarded from plenum.test.node_catchup.helper import waitNodeDataEquality -from plenum.test.pool_transactions.helper import sdk_add_new_steward_and_node +from plenum.test.pool_transactions.helper import vdr_add_new_steward_and_node from plenum.test.test_node import getNonPrimaryReplicas, checkNodesConnected from stp_core.common.log import getlogger from stp_core.loop.eventually import eventually @@ -16,7 +16,7 @@ def test_setup_last_ordered_for_non_master_after_catchup(txnPoolNodeSet, - sdk_wallet_client): + vdr_wallet_client): inst_id = 1 replica = getNonPrimaryReplicas(txnPoolNodeSet, inst_id)[-1] replica._ordering_service.preparesWaitingForPrePrepare.clear() @@ -29,7 +29,7 @@ def test_setup_last_ordered_for_non_master_after_catchup(txnPoolNodeSet, replica.viewNo, ppSeqNo, timestamp, - sdk_wallet_client) + vdr_wallet_client) replica._ordering_service.prePreparesPendingPrevPP[replica.viewNo, ppSeqNo] = deque() replica._ordering_service.prePreparesPendingPrevPP[replica.viewNo, ppSeqNo] \ .append((preprepare, replica.primaryName)) @@ -43,7 +43,7 @@ def test_setup_last_ordered_for_non_master_after_catchup(txnPoolNodeSet, def test_setup_last_ordered_for_non_master_without_preprepare(txnPoolNodeSet, - sdk_wallet_client): + vdr_wallet_client): inst_id = 1 replica = getNonPrimaryReplicas(txnPoolNodeSet, inst_id)[-1] replica._ordering_service.preparesWaitingForPrePrepare.clear() @@ -56,7 +56,7 @@ def test_setup_last_ordered_for_non_master_without_preprepare(txnPoolNodeSet, replica.viewNo, ppSeqNo, timestamp, - sdk_wallet_client) + vdr_wallet_client) replica._ordering_service.preparesWaitingForPrePrepare[replica.viewNo, ppSeqNo] = deque() for node in txnPoolNodeSet: replica._ordering_service.preparesWaitingForPrePrepare[replica.viewNo, ppSeqNo] \ @@ -67,7 +67,7 @@ def test_setup_last_ordered_for_non_master_without_preprepare(txnPoolNodeSet, def test_setup_last_ordered_for_non_master_without_quorum_of_prepares( txnPoolNodeSet, - sdk_wallet_client): + vdr_wallet_client): inst_id = 1 replica = getNonPrimaryReplicas(txnPoolNodeSet, inst_id)[-1] replica._ordering_service.preparesWaitingForPrePrepare.clear() @@ -80,7 +80,7 @@ def test_setup_last_ordered_for_non_master_without_quorum_of_prepares( replica.viewNo, ppSeqNo, timestamp, - sdk_wallet_client) + vdr_wallet_client) replica._ordering_service.prePreparesPendingPrevPP[replica.viewNo, ppSeqNo] = deque() replica._ordering_service.prePreparesPendingPrevPP[replica.viewNo, ppSeqNo] \ .append((preprepare, replica.primaryName)) @@ -92,7 +92,7 @@ def test_setup_last_ordered_for_non_master_without_quorum_of_prepares( def test_setup_last_ordered_for_non_master_for_master(txnPoolNodeSet, - sdk_wallet_client): + vdr_wallet_client): inst_id = 0 replica = getNonPrimaryReplicas(txnPoolNodeSet, inst_id)[-1] replica._ordering_service.preparesWaitingForPrePrepare.clear() @@ -105,7 +105,7 @@ def test_setup_last_ordered_for_non_master_for_master(txnPoolNodeSet, replica.viewNo, ppSeqNo, timestamp, - sdk_wallet_client) + vdr_wallet_client) replica._ordering_service.prePreparesPendingPrevPP[replica.viewNo, ppSeqNo] = deque() replica._ordering_service.prePreparesPendingPrevPP[replica.viewNo, ppSeqNo] \ .append((preprepare, replica.primaryName)) @@ -118,7 +118,7 @@ def test_setup_last_ordered_for_non_master_for_master(txnPoolNodeSet, def test_setup_last_ordered_for_non_master_without_catchup(txnPoolNodeSet, - sdk_wallet_client): + vdr_wallet_client): inst_id = 1 last_ordered_3pc = (0, 12) timestamp = time.time() @@ -132,7 +132,7 @@ def test_setup_last_ordered_for_non_master_without_catchup(txnPoolNodeSet, replica.viewNo, ppSeqNo, timestamp, - sdk_wallet_client) + vdr_wallet_client) replica._ordering_service.prePreparesPendingPrevPP[replica.viewNo, ppSeqNo] = deque() replica._ordering_service.prePreparesPendingPrevPP[replica.viewNo, ppSeqNo] \ .append((preprepare, replica.primaryName)) diff --git a/plenum/test/observer/conftest.py b/plenum/test/observer/conftest.py index c90b0b5a63..279a8d6c42 100644 --- a/plenum/test/observer/conftest.py +++ b/plenum/test/observer/conftest.py @@ -5,7 +5,7 @@ from plenum.common.util import get_utc_epoch from plenum.server.observer.observable import Observable from plenum.server.observer.observer_sync_policy import ObserverSyncPolicyType -from plenum.test.helper import sdk_random_request_objects, generate_state_root +from plenum.test.helper import vdr_random_request_objects, generate_state_root from plenum.test.test_node import TestNode from plenum.test.testable import spyable @@ -60,7 +60,7 @@ def policy_each_reply(observable): @pytest.fixture() def fake_msg_batch_committed(): reqs = [req.as_dict for req in - sdk_random_request_objects(10, identifier="1" * 16, protocol_version=CURRENT_PROTOCOL_VERSION)] + vdr_random_request_objects(10, identifier="1" * 16, protocol_version=CURRENT_PROTOCOL_VERSION)] return BatchCommitted(reqs, DOMAIN_LEDGER_ID, 0, diff --git a/plenum/test/observer/test_observable_each_batch_node_integration.py b/plenum/test/observer/test_observable_each_batch_node_integration.py index 38b8be205d..0649d25677 100644 --- a/plenum/test/observer/test_observable_each_batch_node_integration.py +++ b/plenum/test/observer/test_observable_each_batch_node_integration.py @@ -1,15 +1,15 @@ from plenum.server.observer.observer_sync_policy import ObserverSyncPolicyType -from plenum.test.helper import sdk_send_random_and_check +from plenum.test.helper import vdr_send_random_and_check from plenum.test.spy_helpers import get_count def test_send_to_observers_each_reply_no_observers(node_observable, looper, txnPoolNodeSet, - sdk_wallet_client, sdk_pool_handle): + vdr_wallet_client, vdr_pool_handle): assert 0 == get_count(node_observable, node_observable.send_to_observers) - sdk_send_random_and_check(looper, txnPoolNodeSet, - sdk_pool_handle, sdk_wallet_client, + vdr_send_random_and_check(looper, txnPoolNodeSet, + vdr_pool_handle, vdr_wallet_client, 1) assert 0 == get_count(node_observable, node_observable.send_to_observers) assert 0 == len(node_observable._outbox) @@ -18,11 +18,11 @@ def test_send_to_observers_each_reply_no_observers(node_observable, def test_send_to_observers_each_reply_with_observers(node_observable, looper, txnPoolNodeSet, - sdk_wallet_client, sdk_pool_handle): + vdr_wallet_client, vdr_pool_handle): node_observable.add_observer("observer1", ObserverSyncPolicyType.EACH_BATCH) assert 0 == get_count(node_observable, node_observable.send_to_observers) - sdk_send_random_and_check(looper, txnPoolNodeSet, - sdk_pool_handle, sdk_wallet_client, + vdr_send_random_and_check(looper, txnPoolNodeSet, + vdr_pool_handle, vdr_wallet_client, 1) assert 1 == get_count(node_observable, node_observable.send_to_observers) assert 1 == len(node_observable._outbox) diff --git a/plenum/test/observer/test_observable_node_integration.py b/plenum/test/observer/test_observable_node_integration.py index d80f000581..ccffe63374 100644 --- a/plenum/test/observer/test_observable_node_integration.py +++ b/plenum/test/observer/test_observable_node_integration.py @@ -1,14 +1,14 @@ -from plenum.test.helper import sdk_send_random_and_check +from plenum.test.helper import vdr_send_random_and_check from plenum.test.spy_helpers import get_count def test_append_input(node_observable, looper, txnPoolNodeSet, - sdk_wallet_client, sdk_pool_handle): + vdr_wallet_client, vdr_pool_handle): assert 0 == get_count(node_observable, node_observable.append_input) - sdk_send_random_and_check(looper, txnPoolNodeSet, - sdk_pool_handle, sdk_wallet_client, + vdr_send_random_and_check(looper, txnPoolNodeSet, + vdr_pool_handle, vdr_wallet_client, 1) assert 1 == get_count(node_observable, node_observable.append_input) @@ -16,9 +16,9 @@ def test_append_input(node_observable, def test_process_new_batch(node_observable, looper, txnPoolNodeSet, - sdk_wallet_client, sdk_pool_handle): + vdr_wallet_client, vdr_pool_handle): called_before = get_count(node_observable, node_observable.process_new_batch) - sdk_send_random_and_check(looper, txnPoolNodeSet, - sdk_pool_handle, sdk_wallet_client, + vdr_send_random_and_check(looper, txnPoolNodeSet, + vdr_pool_handle, vdr_wallet_client, 1) assert called_before + 1 == get_count(node_observable, node_observable.process_new_batch) diff --git a/plenum/test/observer/test_observer_node_each_batch.py b/plenum/test/observer/test_observer_node_each_batch.py index aa72e3a163..f689b9983e 100644 --- a/plenum/test/observer/test_observer_node_each_batch.py +++ b/plenum/test/observer/test_observer_node_each_batch.py @@ -5,7 +5,7 @@ from plenum.common.util import randomString from plenum.server.observer.observer_node import NodeObserver from plenum.server.observer.observer_sync_policy import ObserverSyncPolicyType -from plenum.test.helper import sdk_send_random_and_check +from plenum.test.helper import vdr_send_random_and_check from plenum.test.node_catchup.helper import checkNodeDataForEquality, checkNodeDataForInequality from plenum.test.pool_transactions.helper import new_node from stp_core.network.port_dispenser import genHa @@ -37,11 +37,11 @@ def fake_node(txnPoolNodeSet, @pytest.fixture(scope="module") def observed_data_msgs(looper, txnPoolNodeSet, - sdk_pool_handle, sdk_wallet_client): + vdr_pool_handle, vdr_wallet_client): txnPoolNodeSet[0]._observable.add_observer("observer1", ObserverSyncPolicyType.EACH_BATCH) - sdk_send_random_and_check(looper, txnPoolNodeSet, - sdk_pool_handle, sdk_wallet_client, + vdr_send_random_and_check(looper, txnPoolNodeSet, + vdr_pool_handle, vdr_wallet_client, 10) msgs = [] diff --git a/plenum/test/observer/test_observer_node_integration.py b/plenum/test/observer/test_observer_node_integration.py index d7cfbea8b9..23f62e2c38 100644 --- a/plenum/test/observer/test_observer_node_integration.py +++ b/plenum/test/observer/test_observer_node_integration.py @@ -2,7 +2,7 @@ from plenum.common.messages.node_messages import PrePrepare, Prepare, Commit from plenum.server.observer.observer_sync_policy import ObserverSyncPolicyType -from plenum.test.helper import sdk_send_random_and_check +from plenum.test.helper import vdr_send_random_and_check from plenum.test.node_catchup.helper import checkNodeDataForEquality from plenum.test.test_node import TestNode @@ -25,7 +25,7 @@ def do_nothing(msg, sender): def test_observer_node(txnPoolNodeSet, looper, - sdk_pool_handle, sdk_wallet_client): + vdr_pool_handle, vdr_wallet_client): ''' Integration tests checking the full workflow between a real Node Observer and real Node Observables. ''' @@ -39,8 +39,8 @@ def test_observer_node(txnPoolNodeSet, node.add_observer(observer_node.name, ObserverSyncPolicyType.EACH_BATCH) # send requests, so that they will be propagated to Observer (Delta) - sdk_send_random_and_check(looper, txnPoolNodeSet, - sdk_pool_handle, sdk_wallet_client, + vdr_send_random_and_check(looper, txnPoolNodeSet, + vdr_pool_handle, vdr_wallet_client, 10) # check that Delta is in sync with other Nodes. diff --git a/plenum/test/observer/test_observer_policy_each_batch.py b/plenum/test/observer/test_observer_policy_each_batch.py index 7241071306..9ffa49075a 100644 --- a/plenum/test/observer/test_observer_policy_each_batch.py +++ b/plenum/test/observer/test_observer_policy_each_batch.py @@ -7,12 +7,12 @@ from plenum.common.messages.node_messages import BatchCommitted, ObservedData from plenum.common.util import get_utc_epoch from plenum.server.observer.observer_sync_policy_each_batch import ObserverSyncPolicyEachBatch -from plenum.test.helper import sdk_random_request_objects, generate_state_root +from plenum.test.helper import vdr_random_request_objects, generate_state_root def create_observed_data(seq_no_start=1, seq_no_end=5): req_num = seq_no_end - seq_no_start + 1 - reqs = [req.as_dict for req in sdk_random_request_objects( + reqs = [req.as_dict for req in vdr_random_request_objects( req_num, identifier="1" * 16, protocol_version=CURRENT_PROTOCOL_VERSION)] msg = BatchCommitted(reqs, DOMAIN_LEDGER_ID, diff --git a/plenum/test/plugin/demo_plugin/helper.py b/plenum/test/plugin/demo_plugin/helper.py index 08cd016295..4e3f5da3ee 100644 --- a/plenum/test/plugin/demo_plugin/helper.py +++ b/plenum/test/plugin/demo_plugin/helper.py @@ -2,7 +2,7 @@ from plenum.test.test_node import ensure_node_disconnected, TestNode, checkNodesConnected, ensureElectionsDone from plenum.common.constants import TXN_TYPE, DATA -from plenum.test.helper import sdk_gen_request, sdk_sign_and_submit_req_obj, sdk_get_reply, sdk_get_and_check_replies +from plenum.test.helper import vdr_gen_request, vdr_sign_and_submit_req_obj, vdr_get_reply, vdr_get_and_check_replies from plenum.test.plugin.demo_plugin.constants import AUCTION_START, GET_AUCTION @@ -25,10 +25,10 @@ def send_get_auction_txn(looper, def successful_op(looper, op, sdk_wallet, sdk_pool_handle): - req_obj = sdk_gen_request(op, identifier=sdk_wallet[1]) - req = sdk_sign_and_submit_req_obj(looper, sdk_pool_handle, + req_obj = vdr_gen_request(op, identifier=sdk_wallet[1]) + req = vdr_sign_and_submit_req_obj(looper, sdk_pool_handle, sdk_wallet, req_obj) - return sdk_get_and_check_replies(looper, [req]) + return vdr_get_and_check_replies(looper, [req]) def restart_nodes(looper, nodeSet, restart_set, tconf, tdir, allPluginsPath, diff --git a/plenum/test/plugin/demo_plugin/test_catchup.py b/plenum/test/plugin/demo_plugin/test_catchup.py index 54a1824523..fc2bd408c1 100644 --- a/plenum/test/plugin/demo_plugin/test_catchup.py +++ b/plenum/test/plugin/demo_plugin/test_catchup.py @@ -15,13 +15,13 @@ def test_new_node_catchup_plugin_ledger(txn_pool_node_set_post_creation, looper, some_requests, - sdk_new_node_caught_up): + vdr_new_node_caught_up): """ A new node catches up the demo plugin's ledger too """ - assert len(sdk_new_node_caught_up.getLedger(AUCTION_LEDGER_ID)) > 0 + assert len(vdr_new_node_caught_up.getLedger(AUCTION_LEDGER_ID)) > 0 for node in txn_pool_node_set_post_creation[:-1]: - assert len(sdk_new_node_caught_up.getLedger(AUCTION_LEDGER_ID)) == \ + assert len(vdr_new_node_caught_up.getLedger(AUCTION_LEDGER_ID)) == \ len(node.getLedger(AUCTION_LEDGER_ID)) @@ -53,19 +53,19 @@ def some_demo_txns(looper, sdk_wallet_steward, sdk_pool_handle): @pytest.mark.skip(reason="INDY-1297. Node does not catch up on reconnection anymore.") def test_disconnected_node_catchup_plugin_ledger_txns(looper, txnPoolNodeSet, - sdk_wallet_client, - sdk_pool_handle, - sdk_new_node_caught_up): + vdr_wallet_client, + vdr_pool_handle, + vdr_new_node_caught_up): """ A node gets disconnected, a few config ledger txns happen, the disconnected node comes back up and catches up the config ledger """ - new_node = sdk_new_node_caught_up + new_node = vdr_new_node_caught_up disconnect_node_and_ensure_disconnected( looper, txnPoolNodeSet, new_node, stopNode=False) # Do some demo txns; - some_demo_txns(looper, sdk_wallet_client, sdk_pool_handle) + some_demo_txns(looper, vdr_wallet_client, vdr_pool_handle) # Make sure new node got out of sync waitNodeDataInequality(looper, new_node, *txnPoolNodeSet[:-1]) diff --git a/plenum/test/plugin/demo_plugin/test_freshness.py b/plenum/test/plugin/demo_plugin/test_freshness.py index 98a5407856..b14ab6d17c 100644 --- a/plenum/test/plugin/demo_plugin/test_freshness.py +++ b/plenum/test/plugin/demo_plugin/test_freshness.py @@ -17,9 +17,9 @@ def tconf(tconf): def test_update_bls_multi_sig_for_auction_ledger_by_timeout(looper, tconf, txnPoolNodeSet, - sdk_pool_handle, sdk_wallet_steward): + vdr_pool_handle, vdr_wallet_steward): # 1. Update auction ledger - send_auction_txn(looper, sdk_pool_handle, sdk_wallet_steward) + send_auction_txn(looper, vdr_pool_handle, vdr_wallet_steward) # 2. Wait for the first freshness update looper.run(eventually( diff --git a/plenum/test/plugin/demo_plugin/test_freshness_during_ordering.py b/plenum/test/plugin/demo_plugin/test_freshness_during_ordering.py index 76cf0f782c..a5382688c4 100644 --- a/plenum/test/plugin/demo_plugin/test_freshness_during_ordering.py +++ b/plenum/test/plugin/demo_plugin/test_freshness_during_ordering.py @@ -16,15 +16,15 @@ def tconf(tconf): def test_update_bls_multi_sig_when_auction_ledger_orders(looper, tconf, txnPoolNodeSet, - sdk_pool_handle, - sdk_wallet_steward): + vdr_pool_handle, + vdr_wallet_steward): # Update auction ledger so that its state root is different from config ledger for node in txnPoolNodeSet: node.states[AUCTION_LEDGER_ID].set(b'some_key', b'some_value') def send_txn(): send_auction_txn(looper, - sdk_pool_handle, sdk_wallet_steward) + vdr_pool_handle, vdr_wallet_steward) check_update_bls_multi_sig_during_ordering(looper, txnPoolNodeSet, send_txn, diff --git a/plenum/test/plugin/demo_plugin/test_frozen_ledgers.py b/plenum/test/plugin/demo_plugin/test_frozen_ledgers.py index 1589627ec0..65b369c7a6 100644 --- a/plenum/test/plugin/demo_plugin/test_frozen_ledgers.py +++ b/plenum/test/plugin/demo_plugin/test_frozen_ledgers.py @@ -19,24 +19,24 @@ def tconf(tconf): yield tconf -def test_send_freeze_ledgers(looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_trustee): +def test_send_freeze_ledgers(looper, txnPoolNodeSet, vdr_pool_handle, vdr_wallet_trustee): ledger_to_remove = AUCTION_LEDGER_ID # check that the config state doesn't contain frozen ledgers records - result = sdk_get_frozen_ledgers(looper, sdk_pool_handle, - sdk_wallet_trustee)[1]["result"][DATA] + result = sdk_get_frozen_ledgers(looper, vdr_pool_handle, + vdr_wallet_trustee)[1]["result"][DATA] assert result is None # add to the config state a frozen ledgers record with an empty list sdk_send_freeze_ledgers( - looper, sdk_pool_handle, - [sdk_wallet_trustee], + looper, vdr_pool_handle, + [vdr_wallet_trustee], [] ) # check that the config state contains a frozen ledgers record with an empty list - result = sdk_get_frozen_ledgers(looper, sdk_pool_handle, - sdk_wallet_trustee)[1]["result"][DATA] + result = sdk_get_frozen_ledgers(looper, vdr_pool_handle, + vdr_wallet_trustee)[1]["result"][DATA] assert len(result) == 0 # add to the config state a frozen ledgers record with AUCTION ledger @@ -45,14 +45,14 @@ def test_send_freeze_ledgers(looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet timeout=3 * FRESHNESS_TIMEOUT) ) sdk_send_freeze_ledgers( - looper, sdk_pool_handle, - [sdk_wallet_trustee], + looper, vdr_pool_handle, + [vdr_wallet_trustee], [ledger_to_remove] ) # check that the config state contains a frozen ledgers record with AUCTION ledger - result = sdk_get_frozen_ledgers(looper, sdk_pool_handle, - sdk_wallet_trustee)[1]["result"][DATA] + result = sdk_get_frozen_ledgers(looper, vdr_pool_handle, + vdr_wallet_trustee)[1]["result"][DATA] assert len(result) == 1 assert result[str(ledger_to_remove)]["state"] assert result[str(ledger_to_remove)]["ledger"] @@ -60,14 +60,14 @@ def test_send_freeze_ledgers(looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet # add to the config state a frozen ledgers record with an empty list sdk_send_freeze_ledgers( - looper, sdk_pool_handle, - [sdk_wallet_trustee], + looper, vdr_pool_handle, + [vdr_wallet_trustee], [] ) # check that the frozen ledgers list from the state wasn't cleared by the transaction with empty ledgers' list - result = sdk_get_frozen_ledgers(looper, sdk_pool_handle, - sdk_wallet_trustee)[1]["result"][DATA] + result = sdk_get_frozen_ledgers(looper, vdr_pool_handle, + vdr_wallet_trustee)[1]["result"][DATA] assert len(result) == 1 assert result[str(ledger_to_remove)]["state"] assert result[str(ledger_to_remove)]["ledger"] diff --git a/plenum/test/plugin/demo_plugin/test_plugin_basic.py b/plenum/test/plugin/demo_plugin/test_plugin_basic.py index 0d2e3be2f7..737cc39970 100644 --- a/plenum/test/plugin/demo_plugin/test_plugin_basic.py +++ b/plenum/test/plugin/demo_plugin/test_plugin_basic.py @@ -1,9 +1,9 @@ import pytest from plenum.common.exceptions import RequestNackedException -from plenum.test.helper import sdk_gen_request, \ - sdk_sign_and_submit_req_obj, sdk_get_reply, sdk_sign_request_objects, sdk_send_signed_requests, \ - sdk_get_and_check_replies +from plenum.test.helper import vdr_gen_request, \ + vdr_sign_and_submit_req_obj, vdr_get_reply, vdr_sign_request_objects, vdr_send_signed_requests, \ + vdr_get_and_check_replies from plenum.common.constants import TXN_TYPE, DATA from plenum.common.util import randomString from plenum.test.plugin.demo_plugin import AUCTION_LEDGER_ID, dummy_field_length @@ -22,7 +22,7 @@ def test_plugin_setup(txn_pool_node_set_post_creation): def test_plugin_client_req_fields(txn_pool_node_set_post_creation, looper, - sdk_wallet_steward, sdk_pool_handle): + vdr_wallet_steward, vdr_pool_handle): """ Test that plugin's addition of request fields and their validation is successful @@ -33,18 +33,18 @@ def test_plugin_client_req_fields(txn_pool_node_set_post_creation, looper, } # Valid field value results in successful processing - req_obj = sdk_gen_request(op, identifier=sdk_wallet_steward[1], + req_obj = vdr_gen_request(op, identifier=vdr_wallet_steward[1], fix_length_dummy=randomString(dummy_field_length)) - req = sdk_sign_and_submit_req_obj(looper, sdk_pool_handle, sdk_wallet_steward, + req = vdr_sign_and_submit_req_obj(looper, vdr_pool_handle, vdr_wallet_steward, req_obj) - sdk_get_reply(looper, req) + vdr_get_reply(looper, req) # Invalid field value results in proper failure - _, did = sdk_wallet_steward - req = sdk_gen_request(op, identifier=did, fix_length_dummy=randomString(dummy_field_length + 1)) - reqs = sdk_sign_request_objects(looper, sdk_wallet_steward, [req]) - reqs = sdk_send_signed_requests(sdk_pool_handle, reqs) + _, did = vdr_wallet_steward + req = vdr_gen_request(op, identifier=did, fix_length_dummy=randomString(dummy_field_length + 1)) + reqs = vdr_sign_request_objects(looper, vdr_wallet_steward, [req]) + reqs = vdr_send_signed_requests(vdr_pool_handle, reqs, looper) with pytest.raises(RequestNackedException) as e: - sdk_get_and_check_replies(looper, reqs) + vdr_get_and_check_replies(looper, reqs) assert 'should have length' in e._excinfo[1].args[0] diff --git a/plenum/test/plugin/demo_plugin/test_plugin_removing.py b/plenum/test/plugin/demo_plugin/test_plugin_removing.py index de88023a26..3cd441c574 100644 --- a/plenum/test/plugin/demo_plugin/test_plugin_removing.py +++ b/plenum/test/plugin/demo_plugin/test_plugin_removing.py @@ -5,15 +5,15 @@ from plenum.test.freeze_ledgers.helper import sdk_send_freeze_ledgers from plenum.test.node_catchup.helper import ensure_all_nodes_have_same_data -from plenum.test.node_request.helper import sdk_ensure_pool_functional +from plenum.test.node_request.helper import vdr_ensure_pool_functional from plenum.test.plugin.demo_plugin.constants import GET_AUCTION, AUCTION_START from plenum.test.test_node import ensureElectionsDone -from plenum.test.pool_transactions.helper import sdk_build_get_txn_request, sdk_sign_and_send_prepared_request +from plenum.test.pool_transactions.helper import vdr_build_get_txn_request, vdr_sign_and_send_prepared_request from plenum.common.txn_util import get_seq_no, get_payload_data from plenum.test.freshness.helper import check_freshness_updated_for_ledger -from plenum.test.helper import freshness, sdk_get_and_check_replies, sdk_send_random_and_check +from plenum.test.helper import freshness, vdr_get_and_check_replies, vdr_send_random_and_check from plenum.test.plugin.demo_plugin import AUCTION_LEDGER_ID from plenum.test.plugin.demo_plugin.helper import send_auction_txn, send_get_auction_txn, restart_nodes from stp_core.loop.eventually import eventually @@ -34,14 +34,14 @@ def check_get_auction_txn(expected_result, seqNo = get_seq_no(expected_result) _, steward_did = sdk_wallet_steward - request = sdk_build_get_txn_request(looper, steward_did, seqNo, ledger_type=str(AUCTION_LEDGER_ID)) + request = vdr_build_get_txn_request(looper, steward_did, seqNo, ledger_type=str(AUCTION_LEDGER_ID)) request_couple = \ - sdk_sign_and_send_prepared_request(looper, + vdr_sign_and_send_prepared_request(looper, sdk_wallet_steward, sdk_pool_handle, request) - result = sdk_get_and_check_replies(looper, + result = vdr_get_and_check_replies(looper, [request_couple])[0][1]['result'] assert expected_result['reqSignature'] == result['data']['reqSignature'] @@ -53,7 +53,7 @@ def check_get_auction_txn(expected_result, def test_plugin_removing(looper, tconf, txn_pool_node_set_post_creation, - sdk_pool_handle, sdk_wallet_steward, sdk_wallet_trustee, tdir, allPluginsPath): + vdr_pool_handle, vdr_wallet_steward, vdr_wallet_trustee, tdir, allPluginsPath): """ Send a transaction from the plugin Wait for recording a freshness txn @@ -71,11 +71,11 @@ def test_plugin_removing(looper, tconf, txn_pool_node_set_post_creation, txnPoolNodeSet = txn_pool_node_set_post_creation # Update auction ledger - result = send_auction_txn(looper, sdk_pool_handle, sdk_wallet_steward)[0][1]["result"] + result = send_auction_txn(looper, vdr_pool_handle, vdr_wallet_steward)[0][1]["result"] # get txn auction_id, auction_name = list(get_payload_data(result)[DATA].items())[0] - get_auction_result = send_get_auction_txn(looper, sdk_pool_handle, sdk_wallet_steward) + get_auction_result = send_get_auction_txn(looper, vdr_pool_handle, vdr_wallet_steward) assert get_auction_result[0][1]["result"][auction_id] == auction_name # Wait for the first freshness update @@ -85,18 +85,18 @@ def test_plugin_removing(looper, tconf, txn_pool_node_set_post_creation, ) sdk_send_freeze_ledgers( - looper, sdk_pool_handle, - [sdk_wallet_trustee], + looper, vdr_pool_handle, + [vdr_wallet_trustee], [AUCTION_LEDGER_ID] ) with pytest.raises(RequestRejectedException, match="'{}' transaction is forbidden because of " "'{}' ledger is frozen".format(AUCTION_START, AUCTION_LEDGER_ID)): - send_auction_txn(looper, sdk_pool_handle, sdk_wallet_steward) + send_auction_txn(looper, vdr_pool_handle, vdr_wallet_steward) # should failed with "ledger is frozen" - get_auction_result = send_get_auction_txn(looper, sdk_pool_handle, sdk_wallet_steward) + get_auction_result = send_get_auction_txn(looper, vdr_pool_handle, vdr_wallet_steward) assert get_auction_result[0][1]["result"][auction_id] == auction_name # restart pool @@ -106,21 +106,21 @@ def test_plugin_removing(looper, tconf, txn_pool_node_set_post_creation, with pytest.raises(RequestNackedException, match="unknown value '" + str(AUCTION_LEDGER_ID)): check_get_auction_txn(result, looper, - sdk_wallet_steward, - sdk_pool_handle) + vdr_wallet_steward, + vdr_pool_handle) # should failed with "unknown txn" with pytest.raises(RequestNackedException, match="invalid type: " + GET_AUCTION): - send_get_auction_txn(looper, sdk_pool_handle, sdk_wallet_steward) + send_get_auction_txn(looper, vdr_pool_handle, vdr_wallet_steward) with pytest.raises(RequestNackedException, match="invalid type: " + AUCTION_START): - send_auction_txn(looper, sdk_pool_handle, sdk_wallet_steward) + send_auction_txn(looper, vdr_pool_handle, vdr_wallet_steward) - sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_steward, 1) + vdr_send_random_and_check(looper, txnPoolNodeSet, vdr_pool_handle, vdr_wallet_steward, 1) # make sure that all node have equal primaries and can order ensureElectionsDone(looper, txnPoolNodeSet, customTimeout=30) ensure_all_nodes_have_same_data(looper, txnPoolNodeSet, custom_timeout=20) - sdk_ensure_pool_functional(looper, txnPoolNodeSet, sdk_wallet_steward, sdk_pool_handle) + vdr_ensure_pool_functional(looper, txnPoolNodeSet, vdr_wallet_steward, vdr_pool_handle) diff --git a/plenum/test/plugin/demo_plugin/test_plugin_request_handling.py b/plenum/test/plugin/demo_plugin/test_plugin_request_handling.py index 1ba4695d85..d630262ef6 100644 --- a/plenum/test/plugin/demo_plugin/test_plugin_request_handling.py +++ b/plenum/test/plugin/demo_plugin/test_plugin_request_handling.py @@ -3,8 +3,8 @@ from plenum.common.constants import TXN_TYPE, DATA from plenum.common.exceptions import CommonSdkIOException from plenum.test.plugin.demo_plugin.helper import successful_op -from plenum.test.helper import sdk_send_signed_requests, \ - sdk_sign_request_strings, sdk_get_and_check_replies +from plenum.test.helper import vdr_send_signed_requests, \ + vdr_sign_request_strings, vdr_get_and_check_replies from plenum.test.plugin.demo_plugin.constants import AMOUNT, PLACE_BID, \ AUCTION_START, AUCTION_END from stp_core.loop.eventually import eventually @@ -13,27 +13,27 @@ def test_plugin_static_validation(txn_pool_node_set_post_creation, looper, - sdk_wallet_steward, sdk_pool_handle): + vdr_wallet_steward, vdr_pool_handle): """ Check plugin static validation fails and passes """ op = { TXN_TYPE: AUCTION_START } - reqs = sdk_sign_request_strings(looper, sdk_wallet_steward, [op, ]) - reqs = sdk_send_signed_requests(sdk_pool_handle, reqs) + reqs = vdr_sign_request_strings(looper, vdr_wallet_steward, [op, ]) + reqs = vdr_send_signed_requests(vdr_pool_handle, reqs, looper) with pytest.raises(CommonSdkIOException) as exc_info: - sdk_get_and_check_replies(looper, reqs) + vdr_get_and_check_replies(looper, reqs) exc_info.match('Got an error with code 113') op = { TXN_TYPE: AUCTION_START, DATA: 'should be a dict but giving a string' } - reqs = sdk_sign_request_strings(looper, sdk_wallet_steward, [op, ]) - reqs = sdk_send_signed_requests(sdk_pool_handle, reqs) + reqs = vdr_sign_request_strings(looper, vdr_wallet_steward, [op, ]) + reqs = vdr_send_signed_requests(vdr_pool_handle, reqs, looper) with pytest.raises(CommonSdkIOException) as exc_info: - sdk_get_and_check_replies(looper, reqs) + vdr_get_and_check_replies(looper, reqs) exc_info.match('Got an error with code 113') op = { @@ -41,27 +41,27 @@ def test_plugin_static_validation(txn_pool_node_set_post_creation, looper, DATA: {'id': 'abc'} } - successful_op(looper, op, sdk_wallet_steward, sdk_pool_handle) + successful_op(looper, op, vdr_wallet_steward, vdr_pool_handle) op = { TXN_TYPE: PLACE_BID, DATA: {'id': 'abc', AMOUNT: -3} } - reqs = sdk_sign_request_strings(looper, sdk_wallet_steward, [op, ]) - reqs = sdk_send_signed_requests(sdk_pool_handle, reqs) + reqs = vdr_sign_request_strings(looper, vdr_wallet_steward, [op, ]) + reqs = vdr_send_signed_requests(vdr_pool_handle, reqs, looper) with pytest.raises(CommonSdkIOException) as exc_info: - sdk_get_and_check_replies(looper, reqs) + vdr_get_and_check_replies(looper, reqs) exc_info.match('Got an error with code 113') op = { TXN_TYPE: PLACE_BID, DATA: {'id': 'abc', AMOUNT: 20} } - successful_op(looper, op, sdk_wallet_steward, sdk_pool_handle) + successful_op(looper, op, vdr_wallet_steward, vdr_pool_handle) def test_plugin_dynamic_validation(txn_pool_node_set_post_creation, looper, - sdk_wallet_steward, sdk_pool_handle): + vdr_wallet_steward, vdr_pool_handle): """ Check plugin dynamic validation fails and passes """ @@ -69,28 +69,28 @@ def test_plugin_dynamic_validation(txn_pool_node_set_post_creation, looper, TXN_TYPE: AUCTION_END, DATA: {'id': 'abcdef'} } - reqs = sdk_sign_request_strings(looper, sdk_wallet_steward, [op, ]) - reqs = sdk_send_signed_requests(sdk_pool_handle, reqs) + reqs = vdr_sign_request_strings(looper, vdr_wallet_steward, [op, ]) + reqs = vdr_send_signed_requests(vdr_pool_handle, reqs, looper) with pytest.raises(CommonSdkIOException) as exc_info: - sdk_get_and_check_replies(looper, reqs) + vdr_get_and_check_replies(looper, reqs) exc_info.match('Got an error with code 113') op = { TXN_TYPE: AUCTION_START, DATA: {'id': 'xyz'} } - successful_op(looper, op, sdk_wallet_steward, sdk_pool_handle) + successful_op(looper, op, vdr_wallet_steward, vdr_pool_handle) op = { TXN_TYPE: AUCTION_END, DATA: {'id': 'xyz'} } - successful_op(looper, op, sdk_wallet_steward, sdk_pool_handle) + successful_op(looper, op, vdr_wallet_steward, vdr_pool_handle) @pytest.fixture(scope="module") def some_requests(txn_pool_node_set_post_creation, looper, - sdk_wallet_steward, sdk_pool_handle): + vdr_wallet_steward, vdr_pool_handle): def check_auctions_amount(auc, expected_amount): assert did in auc['pqr'] assert auc['pqr'][did] == expected_amount @@ -106,15 +106,15 @@ def check_auctions_amount(auc, expected_amount): TXN_TYPE: AUCTION_START, DATA: {'id': 'pqr'} } - successful_op(looper, op, sdk_wallet_steward, sdk_pool_handle) + successful_op(looper, op, vdr_wallet_steward, vdr_pool_handle) op = { TXN_TYPE: PLACE_BID, DATA: {'id': 'pqr', AMOUNT: 20} } - successful_op(looper, op, sdk_wallet_steward, sdk_pool_handle) + successful_op(looper, op, vdr_wallet_steward, vdr_pool_handle) - _, did = sdk_wallet_steward + _, did = vdr_wallet_steward for node in txn_pool_node_set_post_creation: print(node.name) auctions = node.write_manager.request_handlers[PLACE_BID][0].auctions @@ -125,7 +125,7 @@ def check_auctions_amount(auc, expected_amount): TXN_TYPE: PLACE_BID, DATA: {'id': 'pqr', AMOUNT: 40} } - successful_op(looper, op, sdk_wallet_steward, sdk_pool_handle) + successful_op(looper, op, vdr_wallet_steward, vdr_pool_handle) for node in txn_pool_node_set_post_creation: auctions = node.write_manager.request_handlers[PLACE_BID][0].auctions @@ -136,7 +136,7 @@ def check_auctions_amount(auc, expected_amount): TXN_TYPE: AUCTION_END, DATA: {'id': 'pqr'} } - successful_op(looper, op, sdk_wallet_steward, sdk_pool_handle) + successful_op(looper, op, vdr_wallet_steward, vdr_pool_handle) for node in txn_pool_node_set_post_creation: # Not all batches might have BLS-sig but at least one of them will have assert node.bls_bft.bls_store._kvs.size > old_bls_store_size diff --git a/plenum/test/plugin/demo_plugin/test_request_digest.py b/plenum/test/plugin/demo_plugin/test_request_digest.py index 9ab847dce5..8bbee0cda2 100644 --- a/plenum/test/plugin/demo_plugin/test_request_digest.py +++ b/plenum/test/plugin/demo_plugin/test_request_digest.py @@ -9,22 +9,22 @@ from plenum.common.request import Request from plenum.common.util import randomString -from plenum.test.helper import sdk_random_request_objects, sdk_multisign_request_object, sdk_get_and_check_replies, \ - sdk_send_signed_requests, sdk_gen_request, sdk_sign_and_submit_req_obj, sdk_get_reply +from plenum.test.helper import vdr_random_request_objects, vdr_multisign_request_object, vdr_get_and_check_replies, \ + vdr_send_signed_requests, vdr_gen_request, vdr_sign_and_submit_req_obj, vdr_get_reply from plenum.test.plugin.demo_plugin import dummy_field_length from plenum.test.plugin.demo_plugin.constants import PLACE_BID, AMOUNT, AUCTION_START @pytest.fixture(scope='function') -def two_requests(looper, sdk_wallet_steward): - wh, did = sdk_wallet_steward +def two_requests(looper, vdr_wallet_steward): + wh, did = vdr_wallet_steward op = { TXN_TYPE: AUCTION_START, DATA: {'id': 'xyz'} } - req1 = sdk_gen_request(op, protocol_version=CURRENT_PROTOCOL_VERSION, + req1 = vdr_gen_request(op, protocol_version=CURRENT_PROTOCOL_VERSION, identifier=did).as_dict field = list(PLUGIN_CLIENT_REQUEST_FIELDS.keys())[0] req1[field] = 'x' * 10 @@ -32,10 +32,10 @@ def two_requests(looper, sdk_wallet_steward): req2 = copy.deepcopy(req1) req2[field] = 'z' * 10 - req1 = sdk_multisign_request_object(looper, sdk_wallet_steward, json.dumps(req1)) + req1 = vdr_multisign_request_object(looper, vdr_wallet_steward, json.dumps(req1)) req_obj1 = Request(**json.loads(req1)) - req2 = sdk_multisign_request_object(looper, sdk_wallet_steward, json.dumps(req2)) + req2 = vdr_multisign_request_object(looper, vdr_wallet_steward, json.dumps(req2)) req_obj2 = Request(**json.loads(req2)) assert req_obj1.payload_digest == req_obj2.payload_digest @@ -44,18 +44,18 @@ def two_requests(looper, sdk_wallet_steward): def test_plugin_digest_match_to_written(txn_pool_node_set_post_creation, looper, - sdk_wallet_steward, sdk_pool_handle): + vdr_wallet_steward, vdr_pool_handle): op = { TXN_TYPE: AUCTION_START, DATA: {'id': 'xyz'} } # Valid field value results in successful processing - req_obj = sdk_gen_request(op, identifier=sdk_wallet_steward[1], + req_obj = vdr_gen_request(op, identifier=vdr_wallet_steward[1], fix_length_dummy=randomString(dummy_field_length)) - req = sdk_sign_and_submit_req_obj(looper, sdk_pool_handle, sdk_wallet_steward, + req = vdr_sign_and_submit_req_obj(looper, vdr_pool_handle, vdr_wallet_steward, req_obj) - sdk_get_and_check_replies(looper, [req]) + vdr_get_and_check_replies(looper, [req]) req = Request(**req[0]) value = txn_pool_node_set_post_creation[0].seqNoDB.get_by_full_digest(req.digest) @@ -66,13 +66,13 @@ def test_plugin_digest_match_to_written(txn_pool_node_set_post_creation, looper, def test_send_same_txn_with_different_plugins( - looper, txn_pool_node_set_post_creation, sdk_pool_handle, two_requests): + looper, txn_pool_node_set_post_creation, vdr_pool_handle, two_requests): req1, req2 = two_requests - rep1 = sdk_send_signed_requests(sdk_pool_handle, [req1]) - sdk_get_and_check_replies(looper, rep1) + rep1 = vdr_send_signed_requests(vdr_pool_handle, [req1], looper) + vdr_get_and_check_replies(looper, rep1) - rep2 = sdk_send_signed_requests(sdk_pool_handle, [req2]) + rep2 = vdr_send_signed_requests(vdr_pool_handle, [req2], looper) with pytest.raises(RequestNackedException) as e: - sdk_get_and_check_replies(looper, rep2) + vdr_get_and_check_replies(looper, rep2) e.match('Same txn was already ordered with different signatures or pluggable fields') diff --git a/plenum/test/pool_transactions/conftest.py b/plenum/test/pool_transactions/conftest.py index 297b4ed962..03dbf32a03 100644 --- a/plenum/test/pool_transactions/conftest.py +++ b/plenum/test/pool_transactions/conftest.py @@ -5,7 +5,7 @@ from plenum.common.util import randomString from plenum.test.test_node import checkNodesConnected, TestNode from plenum.test.pool_transactions.helper import \ - sdk_add_new_steward_and_node, sdk_pool_refresh + vdr_add_new_steward_and_node, vdr_pool_refresh @@ -31,7 +31,7 @@ def sdk_node_theta_added(looper, new_steward_name = "testClientSteward" + randomString(3) new_node_name = name or "Theta" new_steward_wallet, new_node = \ - sdk_add_new_steward_and_node(looper, + vdr_add_new_steward_and_node(looper, sdk_pool_handle, sdk_wallet_steward, new_steward_name, @@ -42,7 +42,7 @@ def sdk_node_theta_added(looper, nodeClass=testNodeClass) txnPoolNodeSet.append(new_node) looper.run(checkNodesConnected(txnPoolNodeSet)) - sdk_pool_refresh(looper, sdk_pool_handle) + vdr_pool_refresh(looper, sdk_pool_handle) return new_steward_wallet, new_node @@ -51,12 +51,12 @@ def sdk_node_theta_added_fixture(looper, txnPoolNodeSet, tdir, tconf, - sdk_pool_handle, - sdk_wallet_steward, + vdr_pool_handle, + vdr_wallet_steward, allPluginsPath, testNodeClass=TestNode, name=None): - return sdk_node_theta_added(looper, txnPoolNodeSet, tdir, tconf, sdk_pool_handle, sdk_wallet_steward, + return sdk_node_theta_added(looper, txnPoolNodeSet, tdir, tconf, vdr_pool_handle, vdr_wallet_steward, allPluginsPath, testNodeClass, name) diff --git a/plenum/test/pool_transactions/helper.py b/plenum/test/pool_transactions/helper.py index decf030033..80a67bbecb 100644 --- a/plenum/test/pool_transactions/helper.py +++ b/plenum/test/pool_transactions/helper.py @@ -1,9 +1,14 @@ import json -from indy.did import create_and_store_my_did -from indy.ledger import build_node_request, build_nym_request, \ +from indy.did import create_and_store_my_did as sdk_create_and_store_my_did +from indy.ledger import build_node_request as sdk_build_node_request +from indy.ledger import build_nym_request as sdk_build_nym_request +from indy.ledger import build_get_txn_request as build_get_sdk_txn_request +from indy.pool import refresh_pool_ledger as sdk_refresh_pool_ledger + +from plenum.test.wallet_helper import vdr_create_and_store_did +from indy_vdr.ledger import build_node_request, build_nym_request, \ build_get_txn_request -from indy.pool import refresh_pool_ledger from plenum.test.node_catchup.helper import waitNodeDataEquality from stp_core.loop.looper import Looper @@ -14,11 +19,13 @@ from plenum.common.keygen_utils import initNodeKeysForBothStacks from plenum.common.signer_simple import SimpleSigner from plenum.common.util import randomString, hexToFriendly -from plenum.test.helper import sdk_sign_request_objects, \ +from plenum.test.helper import vdr_sign_request_objects, \ + vdr_send_signed_requests, vdr_json_to_request_object, \ + vdr_get_and_check_replies, vdr_sign_request_strings, sdk_sign_request_objects, \ sdk_send_signed_requests, sdk_json_to_request_object, \ sdk_get_and_check_replies, sdk_sign_request_strings -from plenum.test.node_request.helper import sdk_ensure_pool_functional +from plenum.test.node_request.helper import vdr_ensure_pool_functional, sdk_ensure_pool_functional from plenum.test.test_node import TestNode, \ ensure_node_disconnected, checkNodesConnected from stp_core.network.port_dispenser import genHa @@ -30,16 +37,16 @@ REFRESH_TRY_COUNT = 4 -def new_client_request(role, name, looper, sdk_wallet): - wh, did = sdk_wallet +def vdr_new_client_request(role, name, looper, vdr_wallet): + wh, did = vdr_wallet seed = randomString(32) (named_did, named_verkey) = looper.loop.run_until_complete( - create_and_store_my_did(wh, json.dumps({'seed': seed}))) + vdr_create_and_store_did(wh, seed)) nym_request = looper.loop.run_until_complete( build_nym_request(did, named_did, named_verkey, name, role)) - return sdk_sign_request_strings(looper, sdk_wallet, + return vdr_sign_request_strings(looper, vdr_wallet, [json.loads(nym_request)])[0] @@ -75,8 +82,8 @@ def add_started_node(looper, node_ha, client_ha, txnPoolNodeSet, - sdk_pool_handle, - sdk_wallet_steward, + vdr_pool_handle, + vdr_wallet_steward, bls_key, key_proof): ''' @@ -84,14 +91,14 @@ def add_started_node(looper, that is sends NODE txn. Makes sure that node is actually added and connected to all otehr nodes. ''' - new_steward_wallet_handle = sdk_add_new_nym(looper, sdk_pool_handle, - sdk_wallet_steward, + new_steward_wallet_handle = vdr_add_new_nym(looper, vdr_pool_handle, + vdr_wallet_steward, "Steward" + new_node.name, role=STEWARD_STRING) node_name = new_node.name node_dest = hexToFriendly(new_node.nodestack.verhex) - sdk_send_update_node(looper, new_steward_wallet_handle, - sdk_pool_handle, node_dest, node_name, + vdr_send_update_node(looper, new_steward_wallet_handle, + vdr_pool_handle, node_dest, node_name, node_ha[0], node_ha[1], client_ha[0], client_ha[1], services=[VALIDATOR], @@ -100,10 +107,10 @@ def add_started_node(looper, txnPoolNodeSet.append(new_node) looper.run(checkNodesConnected(txnPoolNodeSet)) - sdk_pool_refresh(looper, sdk_pool_handle) - sdk_ensure_pool_functional(looper, txnPoolNodeSet, - sdk_wallet_steward, - sdk_pool_handle) + vdr_pool_refresh(looper, vdr_pool_handle) + vdr_ensure_pool_functional(looper, txnPoolNodeSet, + vdr_wallet_steward, + vdr_pool_handle) waitNodeDataEquality(looper, new_node, *txnPoolNodeSet[:-1]) @@ -154,9 +161,9 @@ def new_node( return node -def sdk_add_new_steward_and_node(looper, - sdk_pool_handle, - sdk_wallet_steward, +def vdr_add_new_steward_and_node(looper, + vdr_pool_handle, + vdr_wallet_steward, new_steward_name, new_node_name, tdir, @@ -168,14 +175,14 @@ def sdk_add_new_steward_and_node(looper, do_post_node_creation: Callable = None, services=[VALIDATOR], wait_till_added=True): - new_steward_wallet_handle = sdk_add_new_nym(looper, - sdk_pool_handle, - sdk_wallet_steward, + new_steward_wallet_handle = vdr_add_new_nym(looper, + vdr_pool_handle, + vdr_wallet_steward, alias=new_steward_name, role=STEWARD_STRING) - new_node = sdk_add_new_node( + new_node = vdr_add_new_node( looper, - sdk_pool_handle, + vdr_pool_handle, new_steward_wallet_handle, new_node_name, tdir, @@ -189,7 +196,7 @@ def sdk_add_new_steward_and_node(looper, return new_steward_wallet_handle, new_node -def sdk_add_new_nym(looper, sdk_pool_handle, creators_wallet, +def vdr_add_new_nym(looper, vdr_pool_handle, creators_wallet, alias=None, role=None, seed=None, dest=None, verkey=None, skipverkey=False, no_wait=False): seed = seed or randomString(32) @@ -198,22 +205,21 @@ def sdk_add_new_nym(looper, sdk_pool_handle, creators_wallet, # filling nym request and getting steward did # if role == None, we are adding client - nym_request, new_did = looper.loop.run_until_complete( - prepare_nym_request(creators_wallet, seed, + nym_request, new_did = looper.loop.run_until_complete(vdr_prepare_nym_request(creators_wallet, seed, alias, role, dest, verkey, skipverkey)) - # sending request using 'sdk_' functions - request_couple = sdk_sign_and_send_prepared_request(looper, creators_wallet, - sdk_pool_handle, nym_request) + # sending request using 'vdr_' functions + request_couple = vdr_sign_and_send_prepared_request(looper, creators_wallet, + vdr_pool_handle, nym_request) if no_wait: return request_couple # waitng for replies - sdk_get_and_check_replies(looper, [request_couple]) + vdr_get_and_check_replies(looper, [request_couple]) return wh, new_did -def sdk_add_new_node(looper, - sdk_pool_handle, +def vdr_add_new_node(looper, + vdr_pool_handle, steward_wallet_handle, new_node_name, tdir, tconf, @@ -227,8 +233,7 @@ def sdk_add_new_node(looper, # filling node request _, steward_did = steward_wallet_handle - node_request = looper.loop.run_until_complete( - prepare_node_request(steward_did, + node_request = vdr_prepare_node_request(steward_did, new_node_name=new_node_name, clientIp=clientIp, clientPort=clientPort, @@ -237,15 +242,15 @@ def sdk_add_new_node(looper, bls_key=bls_key, sigseed=sigseed, services=services, - key_proof=key_proof)) + key_proof=key_proof) - # sending request using 'sdk_' functions - request_couple = sdk_sign_and_send_prepared_request(looper, steward_wallet_handle, - sdk_pool_handle, node_request) + # sending request using 'vdr_' functions + request_couple = vdr_sign_and_send_prepared_request(looper, steward_wallet_handle, + vdr_pool_handle, node_request) if wait_till_added: # waiting for replies - sdk_get_and_check_replies(looper, [request_couple]) + vdr_get_and_check_replies(looper, [request_couple]) return create_and_start_new_node(looper, new_node_name, tdir, sigseed, (nodeIp, nodePort), (clientIp, clientPort), @@ -259,20 +264,19 @@ async def prepare_schema_request(wallet, named_seed, alias, role): pass -async def prepare_nym_request(wallet, named_seed, alias, +async def vdr_prepare_nym_request(wallet, named_seed, alias, role, dest=None, verkey=None, skipverkey=False): wh, submitter_did = wallet - (named_did, named_verkey) = \ - await create_and_store_my_did(wh, json.dumps({'seed': named_seed})) + (named_did, named_verkey) = await vdr_create_and_store_did(wh, named_seed) named_did = dest or named_did named_verkey = verkey or named_verkey named_verkey = None if skipverkey else named_verkey - nym_request = await build_nym_request(submitter_did, named_did, named_verkey, + nym_request = build_nym_request(submitter_did, named_did, named_verkey, alias, role) return nym_request, named_did -async def prepare_node_request(steward_did, new_node_name=None, clientIp=None, +def vdr_prepare_node_request(steward_did, new_node_name=None, clientIp=None, clientPort=None, nodeIp=None, nodePort=None, bls_key=None, sigseed=None, destination=None, services=[VALIDATOR], key_proof=None): @@ -302,20 +306,18 @@ async def prepare_node_request(steward_did, new_node_name=None, clientIp=None, if services is not None: data['services'] = services - node_request = await build_node_request(steward_did, destination, json.dumps(data)) + node_request = build_node_request(steward_did, destination, data) return node_request -def sdk_sign_and_send_prepared_request(looper, sdk_wallet, sdk_pool_handle, string_req): - signed_reqs = sdk_sign_request_objects(looper, sdk_wallet, - [sdk_json_to_request_object( - json.loads(string_req))]) - request_couple = sdk_send_signed_requests(sdk_pool_handle, signed_reqs)[0] +def vdr_sign_and_send_prepared_request(looper, vdr_wallet, vdr_pool_handle, req_obj): + signed_reqs = vdr_sign_request_objects(looper, vdr_wallet,[req_obj]) + request_couple = vdr_send_signed_requests(vdr_pool_handle, signed_reqs, looper)[0] return request_couple -def sdk_send_update_node(looper, sdk_submitter_wallet, - sdk_pool_handle, +def vdr_send_update_node(looper, vdr_submitter_wallet, + vdr_pool_handle, destination, alias, node_ip, node_port, client_ip, client_port, @@ -323,10 +325,9 @@ def sdk_send_update_node(looper, sdk_submitter_wallet, bls_key=None, key_proof=None, pool_refresh=True): - _, submitter_did = sdk_submitter_wallet + _, submitter_did = vdr_submitter_wallet # filling node request - node_request = looper.loop.run_until_complete( - prepare_node_request(submitter_did, + node_request = vdr_prepare_node_request(submitter_did, new_node_name=alias, clientIp=client_ip, clientPort=client_port, @@ -335,25 +336,24 @@ def sdk_send_update_node(looper, sdk_submitter_wallet, bls_key=bls_key, destination=destination, services=services, - key_proof=key_proof)) + key_proof=key_proof) - # sending request using 'sdk_' functions - request_couple = sdk_sign_and_send_prepared_request(looper, sdk_submitter_wallet, - sdk_pool_handle, node_request) + # sending request using 'vdr_' functions + request_couple = vdr_sign_and_send_prepared_request(looper, vdr_submitter_wallet, + vdr_pool_handle, node_request) # waitng for replies - reply = sdk_get_and_check_replies(looper, [request_couple])[0][1] + reply = vdr_get_and_check_replies(looper, [request_couple])[0][1] if pool_refresh: - sdk_pool_refresh(looper, sdk_pool_handle) + vdr_pool_refresh(looper, vdr_pool_handle) return reply -def sdk_pool_refresh(looper, sdk_pool_handle): - looper.loop.run_until_complete( - refresh_pool_ledger(sdk_pool_handle)) +def vdr_pool_refresh(looper, vdr_pool_handle): + looper.loop.run_until_complete(vdr_pool_handle.refresh()) -def sdk_build_get_txn_request(looper, steward_did, seq_no, ledger_type=None): +def vdr_build_get_txn_request(looper, steward_did, seq_no, ledger_type=None): request = looper.loop.run_until_complete( build_get_txn_request(steward_did, ledger_type, seq_no)) return request @@ -361,7 +361,7 @@ def sdk_build_get_txn_request(looper, steward_did, seq_no, ledger_type=None): def update_node_data_and_reconnect(looper, txnPoolNodeSet, steward_wallet, - sdk_pool_handle, + vdr_pool_handle, node, new_node_ip, new_node_port, new_client_ip, new_client_port, @@ -369,7 +369,7 @@ def update_node_data_and_reconnect(looper, txnPoolNodeSet, node_ha = node.nodestack.ha cli_ha = node.clientstack.ha node_dest = hexToFriendly(node.nodestack.verhex) - sdk_send_update_node(looper, steward_wallet, sdk_pool_handle, + vdr_send_update_node(looper, steward_wallet, vdr_pool_handle, node_dest, node.name, new_node_ip, new_node_port, new_client_ip, new_client_port) @@ -395,17 +395,17 @@ def update_node_data_and_reconnect(looper, txnPoolNodeSet, txnPoolNodeSet[idx] = restartedNode looper.run(checkNodesConnected(txnPoolNodeSet)) - sdk_ensure_pool_functional(looper, txnPoolNodeSet, - steward_wallet, sdk_pool_handle) + vdr_ensure_pool_functional(looper, txnPoolNodeSet, + steward_wallet, vdr_pool_handle) return restartedNode -def sdk_change_node_keys(looper, node, sdk_wallet_steward, sdk_pool_handle, +def vdr_change_node_keys(looper, node, vdr_wallet_steward, vdr_pool_handle, verkey): - _, steward_did = sdk_wallet_steward + _, steward_did = vdr_wallet_steward node_dest = hexToFriendly(node.nodestack.verhex) node_request = looper.loop.run_until_complete( - prepare_node_request(steward_did, + vdr_prepare_node_request(steward_did, new_node_name=node.name, destination=node_dest)) @@ -413,9 +413,9 @@ def sdk_change_node_keys(looper, node, sdk_wallet_steward, sdk_pool_handle, request_json['operation'][VERKEY] = verkey node_request1 = json.dumps(request_json) - request_couple = sdk_sign_and_send_prepared_request(looper, sdk_wallet_steward, - sdk_pool_handle, node_request1) - sdk_get_and_check_replies(looper, [request_couple]) + request_couple = vdr_sign_and_send_prepared_request(looper, vdr_wallet_steward, + vdr_pool_handle, node_request1) + vdr_get_and_check_replies(looper, [request_couple]) node.nodestack.clearLocalRoleKeep() node.nodestack.clearRemoteRoleKeeps() @@ -425,21 +425,21 @@ def sdk_change_node_keys(looper, node, sdk_wallet_steward, sdk_pool_handle, node.clientstack.clearAllDir() -def demote_node(looper, steward_wallet, sdk_pool_handle, +def demote_node(looper, steward_wallet, vdr_pool_handle, node): node_nym = hexToFriendly(node.nodestack.verhex) - sdk_send_update_node(looper, steward_wallet, - sdk_pool_handle, node_nym, node.name, + vdr_send_update_node(looper, steward_wallet, + vdr_pool_handle, node_nym, node.name, None, None, None, None, services=[]) -def promote_node(looper, steward_wallet, sdk_pool_handle, +def promote_node(looper, steward_wallet, vdr_pool_handle, node): node_nym = hexToFriendly(node.nodestack.verhex) - sdk_send_update_node(looper, steward_wallet, - sdk_pool_handle, node_nym, node.name, + vdr_send_update_node(looper, steward_wallet, + vdr_pool_handle, node_nym, node.name, None, None, None, None, services=[VALIDATOR]) @@ -520,6 +520,537 @@ def reconnect_node_and_ensure_connected(looper: Looper, looper.run(checkNodesConnected(poolNodes, customTimeout=timeout)) +def vdr_add_2_nodes(looper, txnPoolNodeSet, + vdr_pool_handle, vdr_wallet_steward, + tdir, tconf, allPluginsPath): + names = ("Zeta", "Eta") + new_nodes = [] + for node_name in names: + new_steward_name = "testClientSteward" + randomString(3) + new_steward_wallet, new_node = \ + vdr_add_new_steward_and_node(looper, + vdr_pool_handle, + vdr_wallet_steward, + new_steward_name, + node_name, + tdir, + tconf, + allPluginsPath) + txnPoolNodeSet.append(new_node) + looper.run(checkNodesConnected(txnPoolNodeSet)) + waitNodeDataEquality(looper, new_node, *txnPoolNodeSet[:-1], + exclude_from_check=['check_last_ordered_3pc_backup']) + vdr_pool_refresh(looper, vdr_pool_handle) + new_nodes.append(new_node) + return new_nodes + + +def vdr_add_new_nym_without_waiting(looper, vdr_pool_handle, creators_wallet, + alias=None, role=None, seed=None, + dest=None, verkey=None, skipverkey=False): + seed = seed or randomString(32) + alias = alias or randomString(5) + wh, _ = creators_wallet + + nym_request, new_did = looper.loop.run_until_complete( + vdr_prepare_nym_request(creators_wallet, seed, + alias, role, dest, verkey, skipverkey)) + vdr_sign_and_send_prepared_request(looper, creators_wallet, + vdr_pool_handle, nym_request) + + +####### SDk + +def sdk_new_client_request(role, name, looper, sdk_wallet): + wh, did = sdk_wallet + seed = randomString(32) + (named_did, named_verkey) = looper.loop.run_until_complete( + sdk_create_and_store_my_did(wh, json.dumps({'seed': seed}))) + nym_request = looper.loop.run_until_complete( + sdk_build_nym_request(did, named_did, named_verkey, + name, role)) + + return sdk_sign_request_strings(looper, sdk_wallet, + [json.loads(nym_request)])[0] + + +def sdk_prepare_new_node_data(tconf, tdir, newNodeName, configClass=PNodeConfigHelper): + sigseed = randomString(32).encode() + (nodeIp, nodePort), (clientIp, clientPort) = genHa(2) + config_helper = configClass(newNodeName, tconf, chroot=tdir) + pubkey, verkey, bls_key, key_proof = initNodeKeysForBothStacks(newNodeName, config_helper.keys_dir, + sigseed, override=True) + return sigseed, verkey, bls_key, nodeIp, nodePort, clientIp, clientPort, key_proof + + +def sdk_start_not_added_node(looper, + tdir, tconf, allPluginsPath, + newNodeName): + ''' + Creates and starts a new node, but doesn't add it to the Pool + (so, NODE txn is not sent). + ''' + sigseed, verkey, bls_key, nodeIp, nodePort, clientIp, clientPort, key_proof = \ + sdk_prepare_new_node_data(tconf, tdir, newNodeName) + + new_node = create_and_start_new_node(looper, newNodeName, + tdir, randomString(32).encode(), + (nodeIp, nodePort), (clientIp, clientPort), + tconf, True, allPluginsPath, TestNode) + return sigseed, bls_key, new_node, (nodeIp, nodePort), \ + (clientIp, clientPort), key_proof + + +def sdk_add_started_node(looper, + new_node, + node_ha, + client_ha, + txnPoolNodeSet, + sdk_pool_handle, + sdk_wallet_steward, + bls_key, + key_proof): + ''' + Adds already created node to the pool, + that is sends NODE txn. + Makes sure that node is actually added and connected to all otehr nodes. + ''' + new_steward_wallet_handle = sdk_add_new_nym(looper, sdk_pool_handle, + sdk_wallet_steward, + "Steward" + new_node.name, + role=STEWARD_STRING) + node_name = new_node.name + node_dest = hexToFriendly(new_node.nodestack.verhex) + sdk_send_update_node(looper, new_steward_wallet_handle, + sdk_pool_handle, node_dest, node_name, + node_ha[0], node_ha[1], + client_ha[0], client_ha[1], + services=[VALIDATOR], + bls_key=bls_key, + key_proof=key_proof) + + txnPoolNodeSet.append(new_node) + looper.run(checkNodesConnected(txnPoolNodeSet)) + sdk_pool_refresh(looper, sdk_pool_handle) + sdk_ensure_pool_functional(looper, txnPoolNodeSet, + sdk_wallet_steward, + sdk_pool_handle) + + waitNodeDataEquality(looper, new_node, *txnPoolNodeSet[:-1]) + + +# def create_and_start_new_node( +# looper, +# node_name, +# tdir, +# sigseed, +# node_ha, +# client_ha, +# tconf, +# auto_start, +# plugin_path, +# nodeClass, +# do_post_node_creation: Callable = None, +# configClass=PNodeConfigHelper): +# node = new_node(node_name=node_name, +# tdir=tdir, +# node_ha=node_ha, +# client_ha=client_ha, +# tconf=tconf, +# plugin_path=plugin_path, +# nodeClass=nodeClass, +# configClass=configClass) +# if do_post_node_creation: +# do_post_node_creation(node) +# if auto_start: +# looper.add(node) +# return node + + +# def new_node( +# node_name, +# tdir, +# node_ha, +# client_ha, +# tconf, +# plugin_path, +# nodeClass, +# configClass=PNodeConfigHelper): +# config_helper = configClass(node_name, tconf, chroot=tdir) +# node = nodeClass(node_name, +# config_helper=config_helper, +# config=tconf, +# ha=node_ha, cliha=client_ha, +# pluginPaths=plugin_path) +# return node + + +def sdk_add_new_steward_and_node(looper, + sdk_pool_handle, + sdk_wallet_steward, + new_steward_name, + new_node_name, + tdir, + tconf, + allPluginsPath=None, + autoStart=True, + nodeClass=TestNode, + transformNodeOpFunc=None, + do_post_node_creation: Callable = None, + services=[VALIDATOR], + wait_till_added=True): + new_steward_wallet_handle = sdk_add_new_nym(looper, + sdk_pool_handle, + sdk_wallet_steward, + alias=new_steward_name, + role=STEWARD_STRING) + new_node = sdk_add_new_node( + looper, + sdk_pool_handle, + new_steward_wallet_handle, + new_node_name, + tdir, + tconf, + allPluginsPath, + autoStart=autoStart, + nodeClass=nodeClass, + do_post_node_creation=do_post_node_creation, + services=services, + wait_till_added=wait_till_added) + return new_steward_wallet_handle, new_node + + +def sdk_add_new_nym(looper, sdk_pool_handle, creators_wallet, + alias=None, role=None, seed=None, + dest=None, verkey=None, skipverkey=False, no_wait=False): + seed = seed or randomString(32) + alias = alias or randomString(5) + wh, _ = creators_wallet + + # filling nym request and getting steward did + # if role == None, we are adding client + nym_request, new_did = looper.loop.run_until_complete( + sdk_prepare_nym_request(creators_wallet, seed, + alias, role, dest, verkey, skipverkey)) + + # sending request using 'sdk_' functions + request_couple = sdk_sign_and_send_prepared_request(looper, creators_wallet, + sdk_pool_handle, nym_request) + if no_wait: + return request_couple + # waitng for replies + sdk_get_and_check_replies(looper, [request_couple]) + return wh, new_did + + +def sdk_add_new_node(looper, + sdk_pool_handle, + steward_wallet_handle, + new_node_name, + tdir, tconf, + allPluginsPath=None, autoStart=True, nodeClass=TestNode, + do_post_node_creation: Callable = None, + services=[VALIDATOR], + wait_till_added=True): + nodeClass = nodeClass or TestNode + sigseed, verkey, bls_key, nodeIp, nodePort, clientIp, clientPort, key_proof = \ + prepare_new_node_data(tconf, tdir, new_node_name) + + # filling node request + _, steward_did = steward_wallet_handle + node_request = looper.loop.run_until_complete( + sdk_prepare_node_request(steward_did, + new_node_name=new_node_name, + clientIp=clientIp, + clientPort=clientPort, + nodeIp=nodeIp, + nodePort=nodePort, + bls_key=bls_key, + sigseed=sigseed, + services=services, + key_proof=key_proof)) + + # sending request using 'sdk_' functions + request_couple = sdk_sign_and_send_prepared_request(looper, steward_wallet_handle, + sdk_pool_handle, node_request) + + if wait_till_added: + # waiting for replies + sdk_get_and_check_replies(looper, [request_couple]) + + return create_and_start_new_node(looper, new_node_name, tdir, sigseed, + (nodeIp, nodePort), (clientIp, clientPort), + tconf, autoStart, allPluginsPath, + nodeClass, + do_post_node_creation=do_post_node_creation, + configClass=PNodeConfigHelper) + + +# async def prepare_schema_request(wallet, named_seed, alias, role): +# pass + + +async def sdk_prepare_nym_request(wallet, named_seed, alias, + role, dest=None, verkey=None, skipverkey=False): + wh, submitter_did = wallet + (named_did, named_verkey) = \ + await sdk_create_and_store_my_did(wh, json.dumps({'seed': named_seed})) + named_did = dest or named_did + named_verkey = verkey or named_verkey + named_verkey = None if skipverkey else named_verkey + nym_request = await sdk_build_nym_request(submitter_did, named_did, named_verkey, + alias, role) + return nym_request, named_did + + +async def sdk_prepare_node_request(steward_did, new_node_name=None, clientIp=None, + clientPort=None, nodeIp=None, nodePort=None, bls_key=None, + sigseed=None, destination=None, services=[VALIDATOR], + key_proof=None): + use_sigseed = sigseed is not None + use_dest = destination is not None + if use_sigseed == use_dest: + raise AttributeError('You should provide only one of: sigseed or destination') + if use_sigseed: + nodeSigner = SimpleSigner(seed=sigseed) + destination = nodeSigner.identifier + + data = {} + if new_node_name is not None: + data['alias'] = new_node_name + if clientIp is not None: + data['client_ip'] = clientIp + if clientPort is not None: + data['client_port'] = clientPort + if nodeIp is not None: + data['node_ip'] = nodeIp + if nodePort is not None: + data['node_port'] = nodePort + if key_proof is not None: + data['blskey_pop'] = key_proof + if bls_key is not None: + data['blskey'] = bls_key + if services is not None: + data['services'] = services + + node_request = await sdk_build_node_request(steward_did, destination, json.dumps(data)) + return node_request + + +def sdk_sign_and_send_prepared_request(looper, sdk_wallet, sdk_pool_handle, string_req): + signed_reqs = sdk_sign_request_objects(looper, sdk_wallet, + [sdk_json_to_request_object( + json.loads(string_req))]) + request_couple = sdk_send_signed_requests(looper, sdk_pool_handle, signed_reqs)[0] + return request_couple + + +def sdk_send_update_node(looper, sdk_submitter_wallet, + sdk_pool_handle, + destination, alias, + node_ip, node_port, + client_ip, client_port, + services=[VALIDATOR], + bls_key=None, + key_proof=None, + pool_refresh=True): + _, submitter_did = sdk_submitter_wallet + # filling node request + node_request = looper.loop.run_until_complete( + sdk_prepare_node_request(submitter_did, + new_node_name=alias, + clientIp=client_ip, + clientPort=client_port, + nodeIp=node_ip, + nodePort=node_port, + bls_key=bls_key, + destination=destination, + services=services, + key_proof=key_proof)) + + # sending request using 'sdk_' functions + request_couple = sdk_sign_and_send_prepared_request(looper, sdk_submitter_wallet, + sdk_pool_handle, node_request) + + # waitng for replies + reply = sdk_get_and_check_replies(looper, [request_couple])[0][1] + if pool_refresh: + sdk_pool_refresh(looper, sdk_pool_handle) + return reply + + +def sdk_pool_refresh(looper, sdk_pool_handle): + looper.loop.run_until_complete( + sdk_refresh_pool_ledger(sdk_pool_handle)) + + +def sdk_build_get_txn_request(looper, steward_did, seq_no, ledger_type=None): + request = looper.loop.run_until_complete( + build_get_sdk_txn_request(steward_did, ledger_type, seq_no)) + return request + + +def sdk_update_node_data_and_reconnect(looper, txnPoolNodeSet, + steward_wallet, + sdk_pool_handle, + node, + new_node_ip, new_node_port, + new_client_ip, new_client_port, + tdir, tconf): + node_ha = node.nodestack.ha + cli_ha = node.clientstack.ha + node_dest = hexToFriendly(node.nodestack.verhex) + sdk_send_update_node(looper, steward_wallet, sdk_pool_handle, + node_dest, node.name, + new_node_ip, new_node_port, + new_client_ip, new_client_port) + # restart the Node with new HA + node.stop() + looper.removeProdable(name=node.name) + config_helper = PNodeConfigHelper(node.name, tconf, chroot=tdir) + restartedNode = TestNode(node.name, + config_helper=config_helper, + config=tconf, + ha=HA(new_node_ip or node_ha.host, + new_node_port or node_ha.port), + cliha=HA(new_client_ip or cli_ha.host, + new_client_port or cli_ha.port)) + looper.add(restartedNode) + + # replace node in txnPoolNodeSet + try: + idx = next(i for i, n in enumerate(txnPoolNodeSet) + if n.name == node.name) + except StopIteration: + raise Exception('{} is not the pool'.format(node)) + txnPoolNodeSet[idx] = restartedNode + + looper.run(checkNodesConnected(txnPoolNodeSet)) + sdk_ensure_pool_functional(looper, txnPoolNodeSet, + steward_wallet, sdk_pool_handle) + return restartedNode + + +def sdk_change_node_keys(looper, node, sdk_wallet_steward, sdk_pool_handle, + verkey): + _, steward_did = sdk_wallet_steward + node_dest = hexToFriendly(node.nodestack.verhex) + node_request = looper.loop.run_until_complete( + sdk_prepare_node_request(steward_did, + new_node_name=node.name, + destination=node_dest)) + + request_json = json.loads(node_request) + request_json['operation'][VERKEY] = verkey + node_request1 = json.dumps(request_json) + + request_couple = sdk_sign_and_send_prepared_request(looper, sdk_wallet_steward, + sdk_pool_handle, node_request1) + sdk_get_and_check_replies(looper, [request_couple]) + + node.nodestack.clearLocalRoleKeep() + node.nodestack.clearRemoteRoleKeeps() + node.nodestack.clearAllDir() + node.clientstack.clearLocalRoleKeep() + node.clientstack.clearRemoteRoleKeeps() + node.clientstack.clearAllDir() + + +# def demote_node(looper, steward_wallet, sdk_pool_handle, +# node): +# node_nym = hexToFriendly(node.nodestack.verhex) +# sdk_send_update_node(looper, steward_wallet, +# sdk_pool_handle, node_nym, node.name, +# None, None, +# None, None, +# services=[]) + + +# def promote_node(looper, steward_wallet, sdk_pool_handle, +# node): +# node_nym = hexToFriendly(node.nodestack.verhex) +# sdk_send_update_node(looper, steward_wallet, +# sdk_pool_handle, node_nym, node.name, +# None, None, +# None, None, +# services=[VALIDATOR]) + + +# def disconnectPoolNode(poolNodes: Iterable, +# disconnect: Union[str, TestNode], +# stopNode=True): +# if isinstance(disconnect, TestNode): +# disconnect = disconnect.name +# assert isinstance(disconnect, str) + +# for node in poolNodes: +# if node.name == disconnect: +# if stopNode: +# node.stop() +# else: +# node.clientstack.close() +# node.nodestack.close() +# break +# else: +# raise AssertionError('The node {} which should be disconnected ' +# 'is not found in the passed pool node list {}' +# .format(disconnect, poolNodes)) + + +# def reconnectPoolNode(looper: Looper, +# poolNodes: Iterable, +# connect: Union[str, TestNode]): +# if isinstance(connect, TestNode): +# connect = connect.name +# assert isinstance(connect, str) + +# for node in poolNodes: +# if node.name == connect: +# if node.isGoing(): +# node.nodestack.open() +# node.clientstack.open() +# node.nodestack.maintainConnections(force=True) +# else: +# node.start(looper) +# break +# else: +# raise AssertionError('The node {} which should be reconnected ' +# 'is not found in the passed pool node list {}' +# .format(connect, poolNodes)) + + +# def disconnect_node_and_ensure_disconnected(looper: Looper, +# poolNodes: Iterable[TestNode], +# disconnect: Union[str, TestNode], +# timeout=None, +# stopNode=True): +# if isinstance(disconnect, TestNode): +# disconnect = disconnect.name +# assert isinstance(disconnect, str) + +# matches = [n for n in poolNodes if n.name == disconnect] +# assert len(matches) == 1 +# node_to_disconnect = matches[0] + +# disconnectPoolNode(poolNodes, disconnect, stopNode=stopNode) +# ensure_node_disconnected(looper, +# node_to_disconnect, +# set(poolNodes) - {node_to_disconnect}, +# timeout=timeout) + + +# def reconnect_node_and_ensure_connected(looper: Looper, +# poolNodes: Iterable[TestNode], +# connect: Union[str, TestNode], +# timeout=None): +# if isinstance(connect, TestNode): +# connect = connect.name +# assert isinstance(connect, str) + +# reconnectPoolNode(looper, poolNodes, connect) +# looper.run(checkNodesConnected(poolNodes, customTimeout=timeout)) + + def sdk_add_2_nodes(looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_steward, tdir, tconf, allPluginsPath): @@ -553,7 +1084,7 @@ def sdk_add_new_nym_without_waiting(looper, sdk_pool_handle, creators_wallet, wh, _ = creators_wallet nym_request, new_did = looper.loop.run_until_complete( - prepare_nym_request(creators_wallet, seed, + sdk_prepare_nym_request(creators_wallet, seed, alias, role, dest, verkey, skipverkey)) sdk_sign_and_send_prepared_request(looper, creators_wallet, - sdk_pool_handle, nym_request) + sdk_pool_handle, nym_request) \ No newline at end of file diff --git a/plenum/test/pool_transactions/test_add_inactive_node_then_activate.py b/plenum/test/pool_transactions/test_add_inactive_node_then_activate.py index 664830f971..041b62815f 100644 --- a/plenum/test/pool_transactions/test_add_inactive_node_then_activate.py +++ b/plenum/test/pool_transactions/test_add_inactive_node_then_activate.py @@ -1,8 +1,8 @@ import pytest -from plenum.test.node_request.helper import sdk_ensure_pool_functional +from plenum.test.node_request.helper import vdr_ensure_pool_functional from plenum.common.util import randomString -from plenum.test.pool_transactions.helper import sdk_add_new_steward_and_node, sdk_pool_refresh, \ +from plenum.test.pool_transactions.helper import vdr_add_new_steward_and_node, vdr_pool_refresh, \ update_node_data_and_reconnect from plenum.test.test_node import checkNodesConnected from stp_core.common.log import getlogger @@ -12,17 +12,17 @@ def testAddInactiveNodeThenActivate(looper, txnPoolNodeSet, - sdk_wallet_steward, - sdk_pool_handle, tdir, tconf, allPluginsPath): + vdr_wallet_steward, + vdr_pool_handle, tdir, tconf, allPluginsPath): new_steward_name = "testClientSteward" + randomString(3) new_node_name = "Kappa" # adding a new node without SERVICES field # it means the node is in the inactive state new_steward_wallet, new_node = \ - sdk_add_new_steward_and_node(looper, - sdk_pool_handle, - sdk_wallet_steward, + vdr_add_new_steward_and_node(looper, + vdr_pool_handle, + vdr_wallet_steward, new_steward_name, new_node_name, tdir, @@ -30,13 +30,13 @@ def testAddInactiveNodeThenActivate(looper, txnPoolNodeSet, allPluginsPath, services=None) looper.run(checkNodesConnected(txnPoolNodeSet)) - sdk_pool_refresh(looper, sdk_pool_handle) + vdr_pool_refresh(looper, vdr_pool_handle) new_node = update_node_data_and_reconnect(looper, txnPoolNodeSet + [new_node], new_steward_wallet, - sdk_pool_handle, + vdr_pool_handle, new_node, None, None, None, None, tdir, tconf) txnPoolNodeSet.append(new_node) - sdk_ensure_pool_functional(looper, txnPoolNodeSet, new_steward_wallet, sdk_pool_handle) + vdr_ensure_pool_functional(looper, txnPoolNodeSet, new_steward_wallet, vdr_pool_handle) diff --git a/plenum/test/pool_transactions/test_add_node_with_invalid_data.py b/plenum/test/pool_transactions/test_add_node_with_invalid_data.py index bb08f5deae..a10c419be1 100644 --- a/plenum/test/pool_transactions/test_add_node_with_invalid_data.py +++ b/plenum/test/pool_transactions/test_add_node_with_invalid_data.py @@ -1,12 +1,12 @@ import pytest -from plenum.test.helper import sdk_get_bad_response +from plenum.test.helper import vdr_get_bad_response from plenum.common.constants import VALIDATOR from plenum.common.util import randomString, hexToFriendly from plenum.common.exceptions import RequestRejectedException -from plenum.test.pool_transactions.helper import sdk_add_new_nym, sdk_sign_and_send_prepared_request, \ - prepare_new_node_data, prepare_node_request, sdk_send_update_node +from plenum.test.pool_transactions.helper import vdr_add_new_nym, vdr_sign_and_send_prepared_request, \ + prepare_new_node_data, vdr_prepare_node_request, vdr_send_update_node def create_specific_node_request(looper, steward_wallet_handle, @@ -23,7 +23,7 @@ def create_specific_node_request(looper, steward_wallet_handle, _, steward_did = steward_wallet_handle node_request = looper.loop.run_until_complete( - prepare_node_request(steward_did, + vdr_prepare_node_request(steward_did, new_node_name=new_node_name, clientIp=client_ip, clientPort=client_port, @@ -40,13 +40,13 @@ def test_add_node_with_existing_data(looper, txnPoolNodeSet, tdir, tconf, - sdk_pool_handle, - sdk_wallet_stewards): + vdr_pool_handle, + vdr_wallet_stewards): alias = randomString(5) new_node_name = "Node-" + alias - steward_wallet_handle = sdk_add_new_nym(looper, - sdk_pool_handle, - sdk_wallet_stewards[0], + steward_wallet_handle = vdr_add_new_nym(looper, + vdr_pool_handle, + vdr_wallet_stewards[0], alias="Steward-" + alias, role='STEWARD') # Setting already existing HAs @@ -56,39 +56,39 @@ def test_add_node_with_existing_data(looper, # Check for existing alias node_request = create_specific_node_request( looper, steward_wallet_handle, tconf, tdir, txnPoolNodeSet[0].name) - request_couple = sdk_sign_and_send_prepared_request(looper, steward_wallet_handle, - sdk_pool_handle, node_request) - sdk_get_bad_response(looper, [request_couple], RequestRejectedException, + request_couple = vdr_sign_and_send_prepared_request(looper, steward_wallet_handle, + vdr_pool_handle, node_request) + vdr_get_bad_response(looper, [request_couple], RequestRejectedException, "Node's alias must be unique") # Check for existing node HAs node_request = create_specific_node_request( looper, steward_wallet_handle, tconf, tdir, new_node_name, new_node_ip=existing_ha[0], new_node_port=existing_ha[1]) - request_couple = sdk_sign_and_send_prepared_request(looper, steward_wallet_handle, - sdk_pool_handle, node_request) - sdk_get_bad_response(looper, [request_couple], RequestRejectedException, + request_couple = vdr_sign_and_send_prepared_request(looper, steward_wallet_handle, + vdr_pool_handle, node_request) + vdr_get_bad_response(looper, [request_couple], RequestRejectedException, "Node's nodestack addresses must be unique") # Check for existing client HAs node_request = create_specific_node_request( looper, steward_wallet_handle, tconf, tdir, new_node_name, new_client_ip=existing_cli_ha[0], new_client_port=existing_cli_ha[1]) - request_couple = sdk_sign_and_send_prepared_request(looper, steward_wallet_handle, - sdk_pool_handle, node_request) - sdk_get_bad_response(looper, [request_couple], RequestRejectedException, + request_couple = vdr_sign_and_send_prepared_request(looper, steward_wallet_handle, + vdr_pool_handle, node_request) + vdr_get_bad_response(looper, [request_couple], RequestRejectedException, "Node's clientstack addresses must be unique") def test_try_change_node_alias(looper, txnPoolNodeSet, - sdk_pool_handle, - sdk_wallet_stewards): + vdr_pool_handle, + vdr_wallet_stewards): node = txnPoolNodeSet[1] node_dest = hexToFriendly(node.nodestack.verhex) with pytest.raises(RequestRejectedException) as e: - sdk_send_update_node(looper, sdk_wallet_stewards[1], - sdk_pool_handle, + vdr_send_update_node(looper, vdr_wallet_stewards[1], + vdr_pool_handle, node_dest, node.name + '-foo', None, None, None, None, diff --git a/plenum/test/pool_transactions/test_add_node_with_invalid_key_proof.py b/plenum/test/pool_transactions/test_add_node_with_invalid_key_proof.py index f9c34fdd3f..56c36c6adf 100644 --- a/plenum/test/pool_transactions/test_add_node_with_invalid_key_proof.py +++ b/plenum/test/pool_transactions/test_add_node_with_invalid_key_proof.py @@ -1,14 +1,14 @@ import pytest from plenum.common.exceptions import RequestNackedException -from plenum.test.helper import sdk_get_and_check_replies +from plenum.test.helper import vdr_get_and_check_replies from plenum.common.constants import VALIDATOR from plenum.test.pool_transactions.helper import prepare_new_node_data, \ - prepare_node_request, sdk_sign_and_send_prepared_request + vdr_prepare_node_request, vdr_sign_and_send_prepared_request def test_add_node_with_invalid_key_proof(looper, - sdk_pool_handle, - sdk_wallet_steward, + vdr_pool_handle, + vdr_wallet_steward, tdir, tconf, allPluginsPath): new_node_name = "NewNode" @@ -18,9 +18,9 @@ def test_add_node_with_invalid_key_proof(looper, key_proof = 'AAAAA' + key_proof[5:] # filling node request - _, steward_did = sdk_wallet_steward + _, steward_did = vdr_wallet_steward node_request = looper.loop.run_until_complete( - prepare_node_request(steward_did, + vdr_prepare_node_request(steward_did, new_node_name=new_node_name, clientIp=clientIp, clientPort=clientPort, @@ -32,14 +32,14 @@ def test_add_node_with_invalid_key_proof(looper, key_proof=key_proof)) # sending request using 'sdk_' functions - request_couple = sdk_sign_and_send_prepared_request(looper, - sdk_wallet_steward, - sdk_pool_handle, + request_couple = vdr_sign_and_send_prepared_request(looper, + vdr_wallet_steward, + vdr_pool_handle, node_request) # waitng for replies with pytest.raises(RequestNackedException) as e: - sdk_get_and_check_replies(looper, [request_couple]) + vdr_get_and_check_replies(looper, [request_couple]) assert "Proof of possession {} " \ "is incorrect for BLS key {}".format(key_proof, bls_key) \ in e._excinfo[1].args[0] diff --git a/plenum/test/pool_transactions/test_add_node_with_not_unique_alias.py b/plenum/test/pool_transactions/test_add_node_with_not_unique_alias.py index 107d9303ba..764108679d 100644 --- a/plenum/test/pool_transactions/test_add_node_with_not_unique_alias.py +++ b/plenum/test/pool_transactions/test_add_node_with_not_unique_alias.py @@ -1,24 +1,24 @@ import pytest from plenum.common.exceptions import RequestRejectedException -from plenum.test.pool_transactions.helper import sdk_add_new_nym, sdk_add_new_node, sdk_pool_refresh +from plenum.test.pool_transactions.helper import vdr_add_new_nym, vdr_add_new_node, vdr_pool_refresh def test_add_node_with_not_unique_alias(looper, tdir, tconf, - sdk_pool_handle, - sdk_wallet_steward, + vdr_pool_handle, + vdr_wallet_steward, allPluginsPath): new_node_name = "Alpha" - new_steward_wallet, steward_did = sdk_add_new_nym(looper, - sdk_pool_handle, - sdk_wallet_steward, + new_steward_wallet, steward_did = vdr_add_new_nym(looper, + vdr_pool_handle, + vdr_wallet_steward, alias="TEST_STEWARD1", role='STEWARD') with pytest.raises(RequestRejectedException) as e: - sdk_add_new_node(looper, - sdk_pool_handle, + vdr_add_new_node(looper, + vdr_pool_handle, (new_steward_wallet, steward_did), new_node_name, tdir, @@ -26,4 +26,4 @@ def test_add_node_with_not_unique_alias(looper, allPluginsPath) assert 'existing data has conflicts with request data' in \ e._excinfo[1].args[0] - sdk_pool_refresh(looper, sdk_pool_handle) \ No newline at end of file + vdr_pool_refresh(looper, vdr_pool_handle) \ No newline at end of file diff --git a/plenum/test/pool_transactions/test_add_stewards_and_client.py b/plenum/test/pool_transactions/test_add_stewards_and_client.py index f1d1d113f2..cd72fa15dc 100644 --- a/plenum/test/pool_transactions/test_add_stewards_and_client.py +++ b/plenum/test/pool_transactions/test_add_stewards_and_client.py @@ -9,14 +9,14 @@ from plenum.common.signer_simple import SimpleSigner from plenum.common.util import randomString from plenum.test import waits -from plenum.test.helper import sdk_get_and_check_replies -from plenum.test.pool_transactions.helper import prepare_new_node_data, prepare_node_request, \ - sdk_sign_and_send_prepared_request +from plenum.test.helper import vdr_get_and_check_replies +from plenum.test.pool_transactions.helper import prepare_new_node_data, vdr_prepare_node_request, \ + vdr_sign_and_send_prepared_request from stp_core.loop.eventually import eventually -def testAddNewClient(looper, txnPoolNodeSet, sdk_wallet_new_client): - _, did = sdk_wallet_new_client +def testAddNewClient(looper, txnPoolNodeSet, vdr_wallet_new_client): + _, did = vdr_wallet_new_client def chk(): for node in txnPoolNodeSet: @@ -29,8 +29,8 @@ def chk(): def testStewardCannotAddNodeWithNonBase58VerKey(looper, tdir, tconf, txnPoolNodeSet, - sdk_pool_handle, - sdk_wallet_new_steward): + vdr_pool_handle, + vdr_wallet_new_steward): """ The Case: Steward accidentally sends the NODE txn with a non base58 verkey. @@ -41,9 +41,9 @@ def testStewardCannotAddNodeWithNonBase58VerKey(looper, tdir, tconf, sigseed, verkey, bls_key, nodeIp, nodePort, clientIp, clientPort, key_proof = \ prepare_new_node_data(tconf, tdir, new_node_name) - _, steward_did = sdk_wallet_new_steward + _, steward_did = vdr_wallet_new_steward node_request = looper.loop.run_until_complete( - prepare_node_request(steward_did, + vdr_prepare_node_request(steward_did, new_node_name=new_node_name, clientIp=clientIp, clientPort=clientPort, @@ -63,19 +63,19 @@ def testStewardCannotAddNodeWithNonBase58VerKey(looper, tdir, tconf, request_json['operation'][TARGET_NYM] = hexVerKey node_request = json.dumps(request_json) - request_couple = sdk_sign_and_send_prepared_request(looper, - sdk_wallet_new_steward, - sdk_pool_handle, + request_couple = vdr_sign_and_send_prepared_request(looper, + vdr_wallet_new_steward, + vdr_pool_handle, node_request) with pytest.raises(RequestNackedException) as e: - sdk_get_and_check_replies(looper, [request_couple]) + vdr_get_and_check_replies(looper, [request_couple]) assert 'client request invalid' in e._excinfo[1].args[0] def testStewardCannotAddNodeWithInvalidHa(looper, tdir, tconf, txnPoolNodeSet, - sdk_wallet_new_steward, - sdk_pool_handle): + vdr_wallet_new_steward, + vdr_pool_handle): """ The case: Steward accidentally sends the NODE txn with an invalid HA. @@ -86,9 +86,9 @@ def testStewardCannotAddNodeWithInvalidHa(looper, tdir, tconf, sigseed, verkey, bls_key, nodeIp, nodePort, clientIp, clientPort, key_proof = \ prepare_new_node_data(tconf, tdir, new_node_name) - _, steward_did = sdk_wallet_new_steward + _, steward_did = vdr_wallet_new_steward node_request = looper.loop.run_until_complete( - prepare_node_request(steward_did, + vdr_prepare_node_request(steward_did, new_node_name=new_node_name, clientIp=clientIp, clientPort=clientPort, @@ -113,14 +113,14 @@ def testStewardCannotAddNodeWithInvalidHa(looper, tdir, tconf, request_json = json.loads(node_request) request_json['operation'][DATA][field] = value node_request1 = json.dumps(request_json) - request_couple = sdk_sign_and_send_prepared_request(looper, - sdk_wallet_new_steward, - sdk_pool_handle, + request_couple = vdr_sign_and_send_prepared_request(looper, + vdr_wallet_new_steward, + vdr_pool_handle, node_request1) # wait NAcks with exact message. it does not works for just 'is invalid' # because the 'is invalid' will check only first few cases with pytest.raises(RequestNackedException) as e: - sdk_get_and_check_replies(looper, [request_couple]) + vdr_get_and_check_replies(looper, [request_couple]) assert 'invalid network ip address' in e._excinfo[1].args[0] or \ 'expected types' in e._excinfo[1].args[0] or \ 'network port out of the range' in e._excinfo[1].args[0] \ No newline at end of file diff --git a/plenum/test/pool_transactions/test_adding_stewards.py b/plenum/test/pool_transactions/test_adding_stewards.py index c7cc9282c0..c3fbae9f19 100644 --- a/plenum/test/pool_transactions/test_adding_stewards.py +++ b/plenum/test/pool_transactions/test_adding_stewards.py @@ -3,9 +3,9 @@ from plenum.common.constants import STEWARD_STRING from plenum.common.exceptions import RequestRejectedException from plenum.common.util import randomString -from plenum.test.helper import sdk_get_replies, sdk_eval_timeout, sdk_check_reply -from plenum.test.pool_transactions.helper import sdk_add_new_nym, \ - prepare_nym_request, sdk_sign_and_send_prepared_request +from plenum.test.helper import vdr_get_replies, vdr_eval_timeout, vdr_check_reply +from plenum.test.pool_transactions.helper import vdr_add_new_nym, \ + vdr_prepare_nym_request, vdr_sign_and_send_prepared_request @pytest.fixture(scope="module") @@ -22,37 +22,37 @@ def reset(): def testOnlyAStewardCanAddAnotherSteward(looper, txnPoolNodeSet, - sdk_pool_handle, - sdk_wallet_steward, - sdk_wallet_client): - sdk_add_new_nym(looper, sdk_pool_handle, sdk_wallet_steward, + vdr_pool_handle, + vdr_wallet_steward, + vdr_wallet_client): + vdr_add_new_nym(looper, vdr_pool_handle, vdr_wallet_steward, alias='testSteward' + randomString(3), role=STEWARD_STRING) seed = randomString(32) - wh, _ = sdk_wallet_client + wh, _ = vdr_wallet_client nym_request, steward_did = looper.loop.run_until_complete( - prepare_nym_request(sdk_wallet_client, seed, + vdr_prepare_nym_request(vdr_wallet_client, seed, 'testSteward2', 'STEWARD')) - request_couple = sdk_sign_and_send_prepared_request(looper, sdk_wallet_client, - sdk_pool_handle, nym_request) - total_timeout = sdk_eval_timeout(1, len(txnPoolNodeSet)) - request_couple = sdk_get_replies(looper, [request_couple], total_timeout)[0] + request_couple = vdr_sign_and_send_prepared_request(looper, vdr_wallet_client, + vdr_pool_handle, nym_request) + total_timeout = vdr_eval_timeout(1, len(txnPoolNodeSet)) + request_couple = vdr_get_replies(looper, [request_couple], total_timeout)[0] with pytest.raises(RequestRejectedException) as e: - sdk_check_reply(request_couple) + vdr_check_reply(request_couple) assert 'Only Steward is allowed to do these transactions' in e._excinfo[1].args[0] def testStewardsCanBeAddedOnlyTillAThresholdIsReached(looper, txnPoolNodeSet, - sdk_pool_handle, - sdk_wallet_steward, + vdr_pool_handle, + vdr_wallet_steward, tconf): - sdk_add_new_nym(looper, sdk_pool_handle, sdk_wallet_steward, + vdr_add_new_nym(looper, vdr_pool_handle, vdr_wallet_steward, alias='testSteward' + randomString(3), role=STEWARD_STRING) with pytest.raises(RequestRejectedException) as e: - sdk_add_new_nym(looper, sdk_pool_handle, sdk_wallet_steward, + vdr_add_new_nym(looper, vdr_pool_handle, vdr_wallet_steward, alias='testSteward' + randomString(3), role=STEWARD_STRING) error_message = 'New stewards cannot be added by other stewards as there ' \ 'are already {} stewards in the system'.format(tconf.stewardThreshold) diff --git a/plenum/test/pool_transactions/test_change_ha_persists_post_nodes_restart.py b/plenum/test/pool_transactions/test_change_ha_persists_post_nodes_restart.py index 1edd3cfe81..c400c43049 100644 --- a/plenum/test/pool_transactions/test_change_ha_persists_post_nodes_restart.py +++ b/plenum/test/pool_transactions/test_change_ha_persists_post_nodes_restart.py @@ -1,9 +1,9 @@ from plenum.common.util import hexToFriendly, randomString from stp_core.common.log import getlogger from plenum.test.node_catchup.helper import waitNodeDataEquality -from plenum.test.node_request.helper import sdk_ensure_pool_functional -from plenum.test.pool_transactions.helper import sdk_send_update_node, sdk_pool_refresh, \ - sdk_add_new_steward_and_node +from plenum.test.node_request.helper import vdr_ensure_pool_functional +from plenum.test.pool_transactions.helper import vdr_send_update_node, vdr_pool_refresh, \ + vdr_add_new_steward_and_node from plenum.test.test_node import TestNode, checkNodesConnected from stp_core.network.port_dispenser import genHa from plenum.common.config_helper import PNodeConfigHelper @@ -13,20 +13,20 @@ def testChangeHaPersistsPostNodesRestart(looper, txnPoolNodeSet, tdir, tconf, - sdk_pool_handle, - sdk_wallet_client, - sdk_wallet_steward): + vdr_pool_handle, + vdr_wallet_client, + vdr_wallet_steward): new_steward_wallet, new_node = \ - sdk_add_new_steward_and_node(looper, - sdk_pool_handle, - sdk_wallet_steward, + vdr_add_new_steward_and_node(looper, + vdr_pool_handle, + vdr_wallet_steward, 'AnotherSteward' + randomString(4), 'AnotherNode' + randomString(4), tdir, tconf) txnPoolNodeSet.append(new_node) looper.run(checkNodesConnected(txnPoolNodeSet)) - sdk_pool_refresh(looper, sdk_pool_handle) + vdr_pool_refresh(looper, vdr_pool_handle) node_new_ha, client_new_ha = genHa(2) logger.debug("{} changing HAs to {} {}".format(new_node, node_new_ha, @@ -34,7 +34,7 @@ def testChangeHaPersistsPostNodesRestart(looper, txnPoolNodeSet, # Making the change HA txn an confirming its succeeded node_dest = hexToFriendly(new_node.nodestack.verhex) - sdk_send_update_node(looper, new_steward_wallet, sdk_pool_handle, + vdr_send_update_node(looper, new_steward_wallet, vdr_pool_handle, node_dest, new_node.name, node_new_ha.host, node_new_ha.port, client_new_ha.host, client_new_ha.port) @@ -67,5 +67,5 @@ def testChangeHaPersistsPostNodesRestart(looper, txnPoolNodeSet, looper.run(checkNodesConnected(restartedNodes)) waitNodeDataEquality(looper, node, *restartedNodes[:-1]) - sdk_pool_refresh(looper, sdk_pool_handle) - sdk_ensure_pool_functional(looper, restartedNodes, sdk_wallet_client, sdk_pool_handle) + vdr_pool_refresh(looper, vdr_pool_handle) + vdr_ensure_pool_functional(looper, restartedNodes, vdr_wallet_client, vdr_pool_handle) diff --git a/plenum/test/pool_transactions/test_client_with_pool_txns.py b/plenum/test/pool_transactions/test_client_with_pool_txns.py index 6748061d82..342050892d 100644 --- a/plenum/test/pool_transactions/test_client_with_pool_txns.py +++ b/plenum/test/pool_transactions/test_client_with_pool_txns.py @@ -1,9 +1,9 @@ -from plenum.test.pool_transactions.helper import sdk_pool_refresh +from plenum.test.pool_transactions.helper import vdr_pool_refresh from stp_core.loop.eventually import eventually from stp_core.common.log import getlogger from plenum.test import waits -from plenum.test.helper import sdk_send_random_and_check -from plenum.test.node_request.helper import sdk_ensure_pool_functional +from plenum.test.helper import vdr_send_random_and_check +from plenum.test.node_request.helper import vdr_ensure_pool_functional from plenum.test.test_node import checkNodesConnected, TestNode, \ ensureElectionsDone from plenum.common.config_helper import PNodeConfigHelper @@ -14,9 +14,9 @@ def testClientConnectToRestartedNodes(looper, txnPoolNodeSet, tdir, tconf, poolTxnNodeNames, allPluginsPath, - sdk_wallet_new_client, - sdk_pool_handle): - sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_new_client, 1) + vdr_wallet_new_client, + vdr_pool_handle): + vdr_send_random_and_check(looper, txnPoolNodeSet, vdr_pool_handle, vdr_wallet_new_client, 1) for node in txnPoolNodeSet: node.stop() looper.removeProdable(node) @@ -39,5 +39,5 @@ def chk(): timeout = waits.expectedPoolGetReadyTimeout(len(txnPoolNodeSet)) looper.run(eventually(chk, retryWait=1, timeout=timeout)) - sdk_pool_refresh(looper, sdk_pool_handle) - sdk_ensure_pool_functional(looper, txnPoolNodeSet, sdk_wallet_new_client, sdk_pool_handle) + vdr_pool_refresh(looper, vdr_pool_handle) + vdr_ensure_pool_functional(looper, txnPoolNodeSet, vdr_wallet_new_client, vdr_pool_handle) diff --git a/plenum/test/pool_transactions/test_demote_nonexisted.py b/plenum/test/pool_transactions/test_demote_nonexisted.py index 7087f91b69..c79d9ef9e0 100644 --- a/plenum/test/pool_transactions/test_demote_nonexisted.py +++ b/plenum/test/pool_transactions/test_demote_nonexisted.py @@ -1,9 +1,9 @@ from plenum.common.signer_simple import SimpleSigner -from plenum.test.pool_transactions.helper import prepare_node_request,\ - sdk_sign_and_send_prepared_request, sdk_pool_refresh +from plenum.test.pool_transactions.helper import vdr_prepare_node_request,\ + vdr_sign_and_send_prepared_request, vdr_pool_refresh from plenum.common.constants import VALIDATOR from plenum.common.util import randomString -from plenum.test.helper import sdk_get_and_check_replies +from plenum.test.helper import vdr_get_and_check_replies from stp_core.network.port_dispenser import genHa @@ -14,22 +14,22 @@ def add_ne_node(looper, sdk_pool_handle, steward_wallet_handle): (nodeIp, nodePort), (clientIp, clientPort) = genHa(2) _, steward_did = steward_wallet_handle node_request = looper.loop.run_until_complete( - prepare_node_request(steward_did, new_node_name=new_node_name, destination=dest, clientIp=clientIp, + vdr_prepare_node_request(steward_did, new_node_name=new_node_name, destination=dest, clientIp=clientIp, clientPort=clientPort, nodeIp=nodeIp, nodePort=nodePort, services=[VALIDATOR])) - request_couple = sdk_sign_and_send_prepared_request(looper, steward_wallet_handle, sdk_pool_handle, node_request) - sdk_get_and_check_replies(looper, [request_couple]) + request_couple = vdr_sign_and_send_prepared_request(looper, steward_wallet_handle, sdk_pool_handle, node_request) + vdr_get_and_check_replies(looper, [request_couple]) return dest, new_node_name -def test_demote_nonexisted(looper, txnPoolNodeSet, sdk_pool_handle, tdir, tconf, sdk_wallet_new_steward): - dst, name = add_ne_node(looper, sdk_pool_handle, sdk_wallet_new_steward) +def test_demote_nonexisted(looper, txnPoolNodeSet, vdr_pool_handle, tdir, tconf, vdr_wallet_new_steward): + dst, name = add_ne_node(looper, vdr_pool_handle, vdr_wallet_new_steward) assert dst - sdk_pool_refresh(looper, sdk_pool_handle) + vdr_pool_refresh(looper, vdr_pool_handle) assert len(txnPoolNodeSet[0].nodeReg) == len(txnPoolNodeSet) + 1 - _, st_did = sdk_wallet_new_steward + _, st_did = vdr_wallet_new_steward node_request = looper.loop.run_until_complete( - prepare_node_request(st_did, destination=dst, new_node_name=name, services=[])) + vdr_prepare_node_request(st_did, destination=dst, new_node_name=name, services=[])) - request_couple = sdk_sign_and_send_prepared_request(looper, sdk_wallet_new_steward, - sdk_pool_handle, node_request) - sdk_get_and_check_replies(looper, [request_couple]) + request_couple = vdr_sign_and_send_prepared_request(looper, vdr_wallet_new_steward, + vdr_pool_handle, node_request) + vdr_get_and_check_replies(looper, [request_couple]) diff --git a/plenum/test/pool_transactions/test_get_txn_request.py b/plenum/test/pool_transactions/test_get_txn_request.py index 76359ec7b8..afb531518f 100644 --- a/plenum/test/pool_transactions/test_get_txn_request.py +++ b/plenum/test/pool_transactions/test_get_txn_request.py @@ -8,10 +8,10 @@ from plenum.common.exceptions import RequestNackedException from plenum.common.txn_util import get_seq_no from plenum.test.pool_transactions.helper import \ - sdk_sign_and_send_prepared_request, prepare_nym_request, \ - sdk_build_get_txn_request + vdr_sign_and_send_prepared_request, vdr_prepare_nym_request, \ + vdr_build_get_txn_request from stp_core.loop.eventually import eventually -from plenum.test.helper import sdk_get_and_check_replies +from plenum.test.helper import vdr_get_and_check_replies from plenum.common.util import getMaxFailures, randomString c_delay = 10 @@ -24,10 +24,10 @@ def test_get_txn_for_invalid_ledger_id(looper, txnPoolNodeSet, - sdk_wallet_steward, - sdk_pool_handle): - _, steward_did = sdk_wallet_steward - request = sdk_build_get_txn_request(looper, steward_did, 1) + vdr_wallet_steward, + vdr_pool_handle): + _, steward_did = vdr_wallet_steward + request = vdr_build_get_txn_request(looper, steward_did, 1) # setting incorrect Ledger_ID request_json = json.loads(request) @@ -35,40 +35,40 @@ def test_get_txn_for_invalid_ledger_id(looper, txnPoolNodeSet, request = json.dumps(request_json) request_couple = \ - sdk_sign_and_send_prepared_request(looper, - sdk_wallet_steward, - sdk_pool_handle, + vdr_sign_and_send_prepared_request(looper, + vdr_wallet_steward, + vdr_pool_handle, request) with pytest.raises(RequestNackedException) as e: - sdk_get_and_check_replies(looper, [request_couple]) + vdr_get_and_check_replies(looper, [request_couple]) assert 'expected one of' in e._excinfo[1].args[0] def test_get_txn_for_invalid_seq_no(looper, txnPoolNodeSet, - sdk_wallet_steward, - sdk_pool_handle): - _, steward_did = sdk_wallet_steward + vdr_wallet_steward, + vdr_pool_handle): + _, steward_did = vdr_wallet_steward # setting incorrect data - request = sdk_build_get_txn_request(looper, steward_did, + request = vdr_build_get_txn_request(looper, steward_did, INVALID_SEQ_NO) request_couple = \ - sdk_sign_and_send_prepared_request(looper, - sdk_wallet_steward, - sdk_pool_handle, + vdr_sign_and_send_prepared_request(looper, + vdr_wallet_steward, + vdr_pool_handle, request) with pytest.raises(RequestNackedException) as e: - sdk_get_and_check_replies(looper, [request_couple]) + vdr_get_and_check_replies(looper, [request_couple]) assert 'cannot be smaller' in e._excinfo[1].args[0] def test_get_txn_for_existing_seq_no(looper, txnPoolNodeSet, - sdk_wallet_steward, - sdk_pool_handle): - _, steward_did = sdk_wallet_steward + vdr_wallet_steward, + vdr_pool_handle): + _, steward_did = vdr_wallet_steward for i in range(2): - request = sdk_build_get_txn_request(looper, steward_did, 1) + request = vdr_build_get_txn_request(looper, steward_did, 1) # Check with and without ledger id request_json = json.loads(request) @@ -76,63 +76,63 @@ def test_get_txn_for_existing_seq_no(looper, txnPoolNodeSet, request_json['operation']['ledgerId'] = 1 request = json.dumps(request_json) - sdk_sign_and_send_prepared_request(looper, - sdk_wallet_steward, - sdk_pool_handle, + vdr_sign_and_send_prepared_request(looper, + vdr_wallet_steward, + vdr_pool_handle, request) def test_get_txn_for_non_existing_seq_no(looper, txnPoolNodeSet, - sdk_wallet_steward, - sdk_pool_handle): - _, steward_did = sdk_wallet_steward + vdr_wallet_steward, + vdr_pool_handle): + _, steward_did = vdr_wallet_steward # setting incorrect data def generate_non_existing_seq_no(): return randint(500, 1000) - request = sdk_build_get_txn_request(looper, steward_did, + request = vdr_build_get_txn_request(looper, steward_did, generate_non_existing_seq_no()) request_couple = \ - sdk_sign_and_send_prepared_request(looper, - sdk_wallet_steward, - sdk_pool_handle, + vdr_sign_and_send_prepared_request(looper, + vdr_wallet_steward, + vdr_pool_handle, request) - reply = sdk_get_and_check_replies(looper, [request_couple])[0][1] + reply = vdr_get_and_check_replies(looper, [request_couple])[0][1] assert reply['result'][DATA] is None def test_get_txn_response_as_expected(looper, txnPoolNodeSet, - sdk_pool_handle, - sdk_wallet_steward): + vdr_pool_handle, + vdr_wallet_steward): seed = randomString(32) - wh, _ = sdk_wallet_steward + wh, _ = vdr_wallet_steward # filling nym request and getting steward did # if role == None, we are adding client nym_request, new_did = looper.loop.run_until_complete( - prepare_nym_request(sdk_wallet_steward, seed, + vdr_prepare_nym_request(vdr_wallet_steward, seed, None, None)) # sending request using 'sdk_' functions - request_couple = sdk_sign_and_send_prepared_request( - looper, sdk_wallet_steward, - sdk_pool_handle, nym_request) + request_couple = vdr_sign_and_send_prepared_request( + looper, vdr_wallet_steward, + vdr_pool_handle, nym_request) - result1 = sdk_get_and_check_replies(looper, + result1 = vdr_get_and_check_replies(looper, [request_couple])[0][1]['result'] seqNo = get_seq_no(result1) - _, steward_did = sdk_wallet_steward - request = sdk_build_get_txn_request(looper, steward_did, seqNo) + _, steward_did = vdr_wallet_steward + request = vdr_build_get_txn_request(looper, steward_did, seqNo) request_couple = \ - sdk_sign_and_send_prepared_request(looper, - sdk_wallet_steward, - sdk_pool_handle, + vdr_sign_and_send_prepared_request(looper, + vdr_wallet_steward, + vdr_pool_handle, request) - result2 = sdk_get_and_check_replies(looper, + result2 = vdr_get_and_check_replies(looper, [request_couple])[0][1]['result'] assert result1['reqSignature'] == result2['data']['reqSignature'] diff --git a/plenum/test/pool_transactions/test_nodes_data_changed.py b/plenum/test/pool_transactions/test_nodes_data_changed.py index 050c5a9473..486bf8108d 100644 --- a/plenum/test/pool_transactions/test_nodes_data_changed.py +++ b/plenum/test/pool_transactions/test_nodes_data_changed.py @@ -3,12 +3,12 @@ from plenum.common.exceptions import RequestRejectedException, \ RequestNackedException from plenum.common.keygen_utils import init_bls_keys -from plenum.test.node_request.helper import sdk_ensure_pool_functional +from plenum.test.node_request.helper import vdr_ensure_pool_functional from plenum.common.constants import CLIENT_STACK_SUFFIX from plenum.common.util import randomString, hexToFriendly -from plenum.test.pool_transactions.helper import sdk_send_update_node, \ - sdk_add_new_steward_and_node, sdk_pool_refresh, \ +from plenum.test.pool_transactions.helper import vdr_send_update_node, \ + vdr_add_new_steward_and_node, vdr_pool_refresh, \ update_node_data_and_reconnect, demote_node from plenum.test.test_node import checkNodesConnected @@ -24,7 +24,7 @@ def test_node_alias_cannot_be_changed(looper, txnPoolNodeSet, - sdk_pool_handle, + vdr_pool_handle, sdk_node_theta_added): """ The node alias cannot be changed. @@ -32,17 +32,17 @@ def test_node_alias_cannot_be_changed(looper, txnPoolNodeSet, new_steward_wallet, new_node = sdk_node_theta_added node_dest = hexToFriendly(new_node.nodestack.verhex) with pytest.raises(RequestRejectedException) as e: - sdk_send_update_node(looper, new_steward_wallet, sdk_pool_handle, + vdr_send_update_node(looper, new_steward_wallet, vdr_pool_handle, node_dest, 'foo', None, None, None, None) assert 'data has conflicts with request data' in e._excinfo[1].args[0] - sdk_pool_refresh(looper, sdk_pool_handle) + vdr_pool_refresh(looper, vdr_pool_handle) def testNodePortChanged(looper, txnPoolNodeSet, - sdk_wallet_steward, - sdk_pool_handle, + vdr_wallet_steward, + vdr_pool_handle, sdk_node_theta_added, tdir, tconf): """ @@ -58,19 +58,19 @@ def testNodePortChanged(looper, txnPoolNodeSet, update_node_data_and_reconnect(looper, txnPoolNodeSet, new_steward_wallet, - sdk_pool_handle, + vdr_pool_handle, new_node, node_ha.host, new_port, cli_ha.host, cli_ha.port, tdir, tconf) - sdk_ensure_pool_functional(looper, txnPoolNodeSet, new_steward_wallet, sdk_pool_handle) + vdr_ensure_pool_functional(looper, txnPoolNodeSet, new_steward_wallet, vdr_pool_handle) # Make sure that no additional view changes happened assert all(n.viewNo == orig_view_no for n in txnPoolNodeSet) def test_fail_node_bls_key_validation(looper, - sdk_pool_handle, + vdr_pool_handle, sdk_node_theta_added): """ Test request for change node bls key with incorrect @@ -82,7 +82,7 @@ def test_fail_node_bls_key_validation(looper, # change key_proof key_proof = 'AAAAA' + key_proof[5:] with pytest.raises(RequestNackedException) as e: - sdk_send_update_node(looper, new_steward_wallet, sdk_pool_handle, + vdr_send_update_node(looper, new_steward_wallet, vdr_pool_handle, node_dest, new_node.name, None, None, None, None, diff --git a/plenum/test/pool_transactions/test_nodes_ha_change_back.py b/plenum/test/pool_transactions/test_nodes_ha_change_back.py index 8f6c2964b2..98af50218e 100644 --- a/plenum/test/pool_transactions/test_nodes_ha_change_back.py +++ b/plenum/test/pool_transactions/test_nodes_ha_change_back.py @@ -1,6 +1,6 @@ from plenum.common.util import hexToFriendly -from plenum.test.pool_transactions.helper import sdk_send_update_node +from plenum.test.pool_transactions.helper import vdr_send_update_node from plenum.test.test_node import TestNode, checkNodesConnected from stp_core.network.port_dispenser import genHa from plenum.common.config_helper import PNodeConfigHelper @@ -9,7 +9,7 @@ def testChangeNodeHaBack(looper, txnPoolNodeSet, - sdk_pool_handle, + vdr_pool_handle, sdk_node_theta_added, tconf, tdir): """ @@ -29,20 +29,20 @@ def testChangeNodeHaBack(looper, txnPoolNodeSet, correct_node_ha = genHa(1) node_dest = hexToFriendly(new_node.nodestack.verhex) - sdk_send_update_node(looper, new_steward_wallet, sdk_pool_handle, + vdr_send_update_node(looper, new_steward_wallet, vdr_pool_handle, node_dest, new_node.name, correct_node_ha.host, correct_node_ha.port, client_ha.host, client_ha.port) # step 2: set 'wrong' HA wrong_node_ha = genHa(1) - sdk_send_update_node(looper, new_steward_wallet, sdk_pool_handle, + vdr_send_update_node(looper, new_steward_wallet, vdr_pool_handle, node_dest, new_node.name, wrong_node_ha.host, wrong_node_ha.port, client_ha.host, client_ha.port) # step 3: set 'correct' HA back - sdk_send_update_node(looper, new_steward_wallet, sdk_pool_handle, + vdr_send_update_node(looper, new_steward_wallet, vdr_pool_handle, node_dest, new_node.name, correct_node_ha.host, correct_node_ha.port, client_ha.host, client_ha.port) diff --git a/plenum/test/pool_transactions/test_nodes_with_pool_txns.py b/plenum/test/pool_transactions/test_nodes_with_pool_txns.py index 50ce009bba..9bd6ba4d74 100644 --- a/plenum/test/pool_transactions/test_nodes_with_pool_txns.py +++ b/plenum/test/pool_transactions/test_nodes_with_pool_txns.py @@ -9,12 +9,12 @@ NODE_IP, NODE_PORT, CLIENT_IP, CLIENT_PORT, STEWARD_STRING from plenum.common.util import getMaxFailures, randomString from plenum.test import waits -from plenum.test.helper import sdk_send_random_and_check, \ - sdk_get_and_check_replies -from plenum.test.node_request.helper import sdk_ensure_pool_functional -from plenum.test.pool_transactions.helper import sdk_add_new_node, \ - sdk_add_2_nodes, sdk_pool_refresh, sdk_add_new_nym, prepare_new_node_data, \ - prepare_node_request, sdk_sign_and_send_prepared_request +from plenum.test.helper import vdr_send_random_and_check, \ + vdr_get_and_check_replies +from plenum.test.node_request.helper import vdr_ensure_pool_functional +from plenum.test.pool_transactions.helper import vdr_add_new_node, \ + vdr_add_2_nodes, vdr_pool_refresh, vdr_add_new_nym, prepare_new_node_data, \ + vdr_prepare_node_request, vdr_sign_and_send_prepared_request from plenum.test.test_node import checkProtocolInstanceSetup from stp_core.common.log import getlogger from stp_core.loop.eventually import eventually @@ -28,44 +28,44 @@ def testStewardCannotAddMoreThanOneNode(looper, txnPoolNodeSet, - sdk_pool_handle, - sdk_wallet_steward, tdir, tconf, + vdr_pool_handle, + vdr_wallet_steward, tdir, tconf, allPluginsPath): new_node_name = "Epsilon" with pytest.raises(RequestRejectedException) as e: - sdk_add_new_node(looper, - sdk_pool_handle, - sdk_wallet_steward, + vdr_add_new_node(looper, + vdr_pool_handle, + vdr_wallet_steward, new_node_name, tdir, tconf, allPluginsPath) assert 'already has a node' in e._excinfo[1].args[0] - sdk_pool_refresh(looper, sdk_pool_handle) + vdr_pool_refresh(looper, vdr_pool_handle) def testClientConnectsToNewNode(looper, - sdk_pool_handle, + vdr_pool_handle, txnPoolNodeSet, sdk_node_theta_added, - sdk_wallet_client): + vdr_wallet_client): """ A client should be able to connect to a newly added node """ _, new_node = sdk_node_theta_added logger.debug("{} connected to the pool".format(new_node)) - sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle, - sdk_wallet_client, 1) + vdr_send_random_and_check(looper, txnPoolNodeSet, vdr_pool_handle, + vdr_wallet_client, 1) def testAdd2NewNodes(looper, txnPoolNodeSet, - sdk_pool_handle, sdk_wallet_steward, + vdr_pool_handle, vdr_wallet_steward, tdir, tconf, allPluginsPath): """ Add 2 new nodes to trigger replica addition and primary election """ - new_nodes = sdk_add_2_nodes(looper, txnPoolNodeSet, - sdk_pool_handle, sdk_wallet_steward, + new_nodes = vdr_add_2_nodes(looper, txnPoolNodeSet, + vdr_pool_handle, vdr_wallet_steward, tdir, tconf, allPluginsPath) for n in new_nodes: logger.debug("{} connected to the pool".format(n)) @@ -80,13 +80,13 @@ def checkFValue(): timeout = waits.expectedClientToPoolConnectionTimeout(len(txnPoolNodeSet)) looper.run(eventually(checkFValue, retryWait=1, timeout=timeout)) checkProtocolInstanceSetup(looper, txnPoolNodeSet, retryWait=1) - sdk_pool_refresh(looper, sdk_pool_handle) + vdr_pool_refresh(looper, vdr_pool_handle) def testStewardCannotAddNodeWithOutFullFieldsSet(looper, tdir, tconf, txnPoolNodeSet, - sdk_pool_handle, - sdk_wallet_steward): + vdr_pool_handle, + vdr_wallet_steward): """ The case: Steward accidentally sends the NODE txn without full fields set. @@ -95,9 +95,9 @@ def testStewardCannotAddNodeWithOutFullFieldsSet(looper, tdir, tconf, """ new_node_name = "Epsilon" - new_steward_wallet_handle = sdk_add_new_nym(looper, - sdk_pool_handle, - sdk_wallet_steward, + new_steward_wallet_handle = vdr_add_new_nym(looper, + vdr_pool_handle, + vdr_wallet_steward, alias='New steward' + randomString( 3), role=STEWARD_STRING) @@ -105,7 +105,7 @@ def testStewardCannotAddNodeWithOutFullFieldsSet(looper, tdir, tconf, prepare_new_node_data(tconf, tdir, new_node_name) _, steward_did = new_steward_wallet_handle node_request = looper.loop.run_until_complete( - prepare_node_request(steward_did, + vdr_prepare_node_request(steward_did, new_node_name=new_node_name, clientIp=clientIp, clientPort=clientPort, @@ -122,26 +122,26 @@ def testStewardCannotAddNodeWithOutFullFieldsSet(looper, tdir, tconf, del request_json['operation'][DATA][NODE_PORT] node_request1 = json.dumps(request_json) - request_couple = sdk_sign_and_send_prepared_request(looper, + request_couple = vdr_sign_and_send_prepared_request(looper, new_steward_wallet_handle, - sdk_pool_handle, + vdr_pool_handle, node_request1) with pytest.raises(RequestNackedException) as e: - sdk_get_and_check_replies(looper, [request_couple]) + vdr_get_and_check_replies(looper, [request_couple]) assert 'missed fields - node_port' in e._excinfo[1].args[0] for fn in (NODE_IP, CLIENT_IP, NODE_PORT, CLIENT_PORT): request_json = json.loads(node_request) del request_json['operation'][DATA][fn] node_request2 = json.dumps(request_json) - request_couple = sdk_sign_and_send_prepared_request(looper, + request_couple = vdr_sign_and_send_prepared_request(looper, new_steward_wallet_handle, - sdk_pool_handle, + vdr_pool_handle, node_request2) # wait NAcks with exact message. it does not works for just 'is missed' # because the 'is missed' will check only first few cases with pytest.raises(RequestNackedException) as e: - sdk_get_and_check_replies(looper, [request_couple]) + vdr_get_and_check_replies(looper, [request_couple]) assert 'missed fields' in e._excinfo[1].args[0] @@ -149,7 +149,7 @@ def testNodesConnect(txnPoolNodeSet): pass -def testNodesReceiveClientMsgs(looper, txnPoolNodeSet, sdk_wallet_client, - sdk_pool_handle): - sdk_ensure_pool_functional(looper, txnPoolNodeSet, sdk_wallet_client, - sdk_pool_handle) +def testNodesReceiveClientMsgs(looper, txnPoolNodeSet, vdr_wallet_client, + vdr_pool_handle): + vdr_ensure_pool_functional(looper, txnPoolNodeSet, vdr_wallet_client, + vdr_pool_handle) diff --git a/plenum/test/pool_transactions/test_start_many_nodes.py b/plenum/test/pool_transactions/test_start_many_nodes.py index 578bfa2be4..3f4454e00c 100644 --- a/plenum/test/pool_transactions/test_start_many_nodes.py +++ b/plenum/test/pool_transactions/test_start_many_nodes.py @@ -1,21 +1,21 @@ # Start with 8 nodes from plenum.common.txn_util import get_type, get_payload_data -from plenum.test.node_request.helper import sdk_ensure_pool_functional +from plenum.test.node_request.helper import vdr_ensure_pool_functional from plenum.common.constants import NYM, ROLE, STEWARD from plenum.test.helper import assertEquality -from plenum.test.node_request.helper import sdk_ensure_pool_functional +from plenum.test.node_request.helper import vdr_ensure_pool_functional nodeCount = 8 def test_genesis_nodes(looper, txnPoolNodeSet, - sdk_pool_handle, - sdk_wallet_client): + vdr_pool_handle, + vdr_wallet_client): assert len(txnPoolNodeSet) == nodeCount for node in txnPoolNodeSet: assertEquality(node.poolLedger.size, nodeCount) stw_count = sum(1 for _, txn in node.domainLedger.getAllTxn() if (get_type(txn) == NYM) and (get_payload_data(txn).get(ROLE) == STEWARD)) assertEquality(stw_count, nodeCount) - sdk_ensure_pool_functional(looper, txnPoolNodeSet, sdk_wallet_client, sdk_pool_handle) + vdr_ensure_pool_functional(looper, txnPoolNodeSet, vdr_wallet_client, vdr_pool_handle) diff --git a/plenum/test/pool_transactions/test_suspend_node.py b/plenum/test/pool_transactions/test_suspend_node.py index 0b40813af3..285df46ec8 100644 --- a/plenum/test/pool_transactions/test_suspend_node.py +++ b/plenum/test/pool_transactions/test_suspend_node.py @@ -9,13 +9,13 @@ from plenum.test.helper import sendMessageAndCheckDelivery -from plenum.test.node_request.helper import sdk_ensure_pool_functional +from plenum.test.node_request.helper import vdr_ensure_pool_functional from plenum.test.view_change.helper import start_stopped_node from stp_core.loop.eventually import eventually from plenum.server.node import Node from plenum.test.pool_transactions.helper import demote_node, \ - promote_node, sdk_pool_refresh, sdk_send_update_node + promote_node, vdr_pool_refresh, vdr_send_update_node from plenum.test.test_node import checkNodesConnected @@ -30,14 +30,14 @@ def checkNodeNotInNodeReg(node, nodeName): def test_steward_suspends_node_and_promote_with_new_ha( looper, txnPoolNodeSet, tdir, tconf, - sdk_pool_handle, - sdk_wallet_steward, + vdr_pool_handle, + vdr_wallet_steward, sdk_node_theta_added, poolTxnStewardData, allPluginsPath): new_steward_wallet, new_node = sdk_node_theta_added looper.run(checkNodesConnected(txnPoolNodeSet + [new_node])) - demote_node(looper, new_steward_wallet, sdk_pool_handle, new_node) + demote_node(looper, new_steward_wallet, vdr_pool_handle, new_node) # Check suspended node does not exist in any nodeReg or remotes of # nodes or clients @@ -46,7 +46,7 @@ def test_steward_suspends_node_and_promote_with_new_ha( looper.run(eventually(checkNodeNotInNodeReg, node, new_node.name)) # Check that a node does not connect to the suspended # node - sdk_ensure_pool_functional(looper, txnPoolNodeSet, new_steward_wallet, sdk_pool_handle) + vdr_ensure_pool_functional(looper, txnPoolNodeSet, new_steward_wallet, vdr_pool_handle) with pytest.raises(RemoteNotFound): looper.loop.run_until_complete(sendMessageAndCheckDelivery(txnPoolNodeSet[0], new_node)) @@ -57,8 +57,8 @@ def test_steward_suspends_node_and_promote_with_new_ha( # nodes and clients can also connect to that node node_ha, client_ha = genHa(2) node_nym = hexToFriendly(new_node.nodestack.verhex) - sdk_send_update_node(looper, new_steward_wallet, - sdk_pool_handle, node_nym, new_node.name, + vdr_send_update_node(looper, new_steward_wallet, + vdr_pool_handle, node_nym, new_node.name, node_ha.host, node_ha.port, client_ha.host, client_ha.port, services=[VALIDATOR]) @@ -70,5 +70,5 @@ def test_steward_suspends_node_and_promote_with_new_ha( assert all(node.nodestack.remotes[new_node.name].ha == node_ha for node in txnPoolNodeSet) txnPoolNodeSet.append(nodeTheta) looper.run(checkNodesConnected(txnPoolNodeSet)) - sdk_pool_refresh(looper, sdk_pool_handle) - sdk_ensure_pool_functional(looper, txnPoolNodeSet, sdk_wallet_steward, sdk_pool_handle) + vdr_pool_refresh(looper, vdr_pool_handle) + vdr_ensure_pool_functional(looper, txnPoolNodeSet, vdr_wallet_steward, vdr_pool_handle) diff --git a/plenum/test/pool_transactions/test_txn_pool_manager.py b/plenum/test/pool_transactions/test_txn_pool_manager.py index 827ac57f19..7a85472a75 100644 --- a/plenum/test/pool_transactions/test_txn_pool_manager.py +++ b/plenum/test/pool_transactions/test_txn_pool_manager.py @@ -4,7 +4,7 @@ from plenum.test.test_node import TestNode from stp_core.loop.eventually import eventually -from plenum.test.helper import sdk_send_random_and_check, assertExp +from plenum.test.helper import vdr_send_random_and_check, assertExp from plenum.common.txn_util import get_type, get_payload_data @@ -17,14 +17,14 @@ def test_twice_demoted_node_dont_write_txns(txnPoolNodeSet, - looper, sdk_wallet_stewards, sdk_pool_handle): + looper, vdr_wallet_stewards, vdr_pool_handle): request_count = 5 demoted_node = txnPoolNodeSet[2] alive_pool = list(txnPoolNodeSet) alive_pool.remove(demoted_node) - demote_node(looper, sdk_wallet_stewards[2], sdk_pool_handle, demoted_node) - demote_node(looper, sdk_wallet_stewards[2], sdk_pool_handle, demoted_node) + demote_node(looper, vdr_wallet_stewards[2], vdr_pool_handle, demoted_node) + demote_node(looper, vdr_wallet_stewards[2], vdr_pool_handle, demoted_node) demoted_nym = None for _, txn in txnPoolNodeSet[0].poolManager.ledger.getAllTxn(): @@ -37,8 +37,8 @@ def test_twice_demoted_node_dont_write_txns(txnPoolNodeSet, assert all(node.write_manager.get_node_data(demoted_nym)[SERVICES] == [] for node in alive_pool) - sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle, - sdk_wallet_stewards[0], request_count) + vdr_send_random_and_check(looper, txnPoolNodeSet, vdr_pool_handle, + vdr_wallet_stewards[0], request_count) looper.run( eventually( @@ -60,8 +60,8 @@ def test_get_nym_by_name_not_in_registry(txnPoolNodeSet, pool_node_txns): def test_get_nym_by_name_demoted(txnPoolNodeSet, pool_node_txns, - looper, sdk_wallet_stewards, sdk_pool_handle): - demote_node(looper, sdk_wallet_stewards[0], sdk_pool_handle, + looper, vdr_wallet_stewards, vdr_pool_handle): + demote_node(looper, vdr_wallet_stewards[0], vdr_pool_handle, txnPoolNodeSet[0]) check_get_nym_by_name(txnPoolNodeSet, pool_node_txns) diff --git a/plenum/test/pool_transactions/test_z_node_key_changed.py b/plenum/test/pool_transactions/test_z_node_key_changed.py index 4ff5cfb3dc..78ddcc497d 100644 --- a/plenum/test/pool_transactions/test_z_node_key_changed.py +++ b/plenum/test/pool_transactions/test_z_node_key_changed.py @@ -2,7 +2,7 @@ import base58 import types -from plenum.test.node_request.helper import sdk_ensure_pool_functional +from plenum.test.node_request.helper import vdr_ensure_pool_functional from plenum.common import stack_manager from plenum.common.keygen_utils import initNodeKeysForBothStacks, \ @@ -10,7 +10,7 @@ from plenum.common.signer_simple import SimpleSigner from plenum.common.util import randomString from plenum.test.node_catchup.helper import waitNodeDataEquality -from plenum.test.pool_transactions.helper import sdk_change_node_keys +from plenum.test.pool_transactions.helper import vdr_change_node_keys from plenum.test.test_node import TestNode, checkNodesConnected from plenum.common.config_helper import PNodeConfigHelper from stp_core.common.log import getlogger @@ -26,7 +26,7 @@ def testNodeKeysChanged(looper, txnPoolNodeSet, tdir, tconf, sdk_node_theta_added, - sdk_pool_handle, + vdr_pool_handle, allPluginsPath=None): # 1. Add new node orig_view_no = txnPoolNodeSet[0].viewNo @@ -38,7 +38,7 @@ def testNodeKeysChanged(looper, txnPoolNodeSet, tdir, nodeHa, nodeCHa = HA(*new_node.nodestack.ha), HA(*new_node.clientstack.ha) sigseed = randomString(32).encode() verkey = base58.b58encode(SimpleSigner(seed=sigseed).naclSigner.verraw).decode("utf-8") - sdk_change_node_keys(looper, new_node, new_steward_wallet, sdk_pool_handle, verkey) + vdr_change_node_keys(looper, new_node, new_steward_wallet, vdr_pool_handle, verkey) # 3. Start the new node back with the new keys logger.debug("{} starting with HAs {} {}".format(new_node, nodeHa, nodeCHa)) @@ -56,14 +56,14 @@ def testNodeKeysChanged(looper, txnPoolNodeSet, tdir, looper.run(checkNodesConnected(txnPoolNodeSet)) waitNodeDataEquality(looper, node, *txnPoolNodeSet[:-1], exclude_from_check=['check_last_ordered_3pc_backup']) - sdk_ensure_pool_functional(looper, txnPoolNodeSet, new_steward_wallet, sdk_pool_handle) + vdr_ensure_pool_functional(looper, txnPoolNodeSet, new_steward_wallet, vdr_pool_handle) # 5. Make sure that no additional view changes happened assert all(n.viewNo == orig_view_no for n in txnPoolNodeSet) def test_node_init_remote_keys_errors_not_suppressed(looper, txnPoolNodeSet, sdk_node_theta_added, monkeypatch, - sdk_pool_handle): + vdr_pool_handle): TEST_EXCEPTION_MESSAGE = 'Failed to create some cert files' new_steward_wallet, new_node = sdk_node_theta_added @@ -98,6 +98,6 @@ def stackKeysChanged(self, *args, **kwargs): monkeypatch.setattr(stack_manager, 'initRemoteKeys', initRemoteKeysMock) - sdk_change_node_keys(looper, new_node, new_steward_wallet, sdk_pool_handle, verkey) + vdr_change_node_keys(looper, new_node, new_steward_wallet, vdr_pool_handle, verkey) monkeypatch.undo() diff --git a/plenum/test/pp_seq_no_restoration/test_backup_primary_restores_pp_seq_no_if_view_is_same.py b/plenum/test/pp_seq_no_restoration/test_backup_primary_restores_pp_seq_no_if_view_is_same.py index 350c8a493d..2f37d1ccb4 100644 --- a/plenum/test/pp_seq_no_restoration/test_backup_primary_restores_pp_seq_no_if_view_is_same.py +++ b/plenum/test/pp_seq_no_restoration/test_backup_primary_restores_pp_seq_no_if_view_is_same.py @@ -2,7 +2,7 @@ from plenum.common.constants import LAST_SENT_PRE_PREPARE from plenum.test import waits from plenum.test.checkpoints.conftest import chkFreqPatched -from plenum.test.helper import sdk_send_batches_of_random, assertExp +from plenum.test.helper import vdr_send_batches_of_random, assertExp from plenum.test.pool_transactions.helper import \ disconnect_node_and_ensure_disconnected from plenum.test.test_node import ensureElectionsDone, getPrimaryReplica, \ @@ -21,15 +21,15 @@ def test_backup_primary_restores_pp_seq_no_if_view_is_same( - looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_client, + looper, txnPoolNodeSet, vdr_pool_handle, vdr_wallet_client, tconf, tdir, allPluginsPath, chkFreqPatched, view_no): # Get a node with a backup primary replica replica = getPrimaryReplica(txnPoolNodeSet, instId=backup_inst_id) batches_count = 0 if view_no == 0 else 1 node = replica.node # Send some 3PC-batches and wait until the replica orders the 3PC-batches - sdk_send_batches_of_random(looper, txnPoolNodeSet, - sdk_pool_handle, sdk_wallet_client, + vdr_send_batches_of_random(looper, txnPoolNodeSet, + vdr_pool_handle, vdr_wallet_client, num_reqs=7, num_batches=num_batches, timeout=tconf.Max3PCBatchWait) batches_count += num_batches @@ -82,8 +82,8 @@ def test_backup_primary_restores_pp_seq_no_if_view_is_same( assert LAST_SENT_PRE_PREPARE in node.nodeStatusDB # Send a 3PC-batch and ensure that the replica orders it - sdk_send_batches_of_random(looper, txnPoolNodeSet, - sdk_pool_handle, sdk_wallet_client, + vdr_send_batches_of_random(looper, txnPoolNodeSet, + vdr_pool_handle, vdr_wallet_client, num_reqs=1, num_batches=1, timeout=tconf.Max3PCBatchWait) batches_count += 1 diff --git a/plenum/test/pp_seq_no_restoration/test_node_erases_last_sent_pp_key_on_pool_restart.py b/plenum/test/pp_seq_no_restoration/test_node_erases_last_sent_pp_key_on_pool_restart.py index 822c995af6..26192bc814 100644 --- a/plenum/test/pp_seq_no_restoration/test_node_erases_last_sent_pp_key_on_pool_restart.py +++ b/plenum/test/pp_seq_no_restoration/test_node_erases_last_sent_pp_key_on_pool_restart.py @@ -3,7 +3,7 @@ from plenum.common.constants import LAST_SENT_PRE_PREPARE from plenum.test import waits from plenum.test.checkpoints.conftest import chkFreqPatched -from plenum.test.helper import sdk_send_batches_of_random, assertExp +from plenum.test.helper import vdr_send_batches_of_random, assertExp from plenum.test.pool_transactions.helper import \ disconnect_node_and_ensure_disconnected from plenum.test.test_node import ensureElectionsDone, getPrimaryReplica, \ @@ -20,15 +20,15 @@ def test_node_not_erases_last_sent_pp_key_on_pool_restart( - looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_client, + looper, txnPoolNodeSet, vdr_pool_handle, vdr_wallet_client, tconf, tdir, allPluginsPath, chkFreqPatched): # Get a node with a backup primary replica and the rest of the nodes replica = getPrimaryReplica(txnPoolNodeSet, instId=backup_inst_id) node = replica.node # Send some 3PC-batches and wait until the replica orders the 3PC-batches - sdk_send_batches_of_random(looper, txnPoolNodeSet, - sdk_pool_handle, sdk_wallet_client, + vdr_send_batches_of_random(looper, txnPoolNodeSet, + vdr_pool_handle, vdr_wallet_client, num_reqs=7, num_batches=7, timeout=tconf.Max3PCBatchWait) @@ -85,8 +85,8 @@ def test_node_not_erases_last_sent_pp_key_on_pool_restart( assert replica.H == replica.last_ordered_3pc[1] + LOG_SIZE # Send a 3PC-batch and ensure that the replica orders it - sdk_send_batches_of_random(looper, txnPoolNodeSet, - sdk_pool_handle, sdk_wallet_client, + vdr_send_batches_of_random(looper, txnPoolNodeSet, + vdr_pool_handle, vdr_wallet_client, num_reqs=1, num_batches=1, timeout=tconf.Max3PCBatchWait) diff --git a/plenum/test/pp_seq_no_restoration/test_node_erases_last_sent_pp_key_on_view_change.py b/plenum/test/pp_seq_no_restoration/test_node_erases_last_sent_pp_key_on_view_change.py index 9992e6d461..cf1e4bf6e7 100644 --- a/plenum/test/pp_seq_no_restoration/test_node_erases_last_sent_pp_key_on_view_change.py +++ b/plenum/test/pp_seq_no_restoration/test_node_erases_last_sent_pp_key_on_view_change.py @@ -2,7 +2,7 @@ from plenum.common.constants import LAST_SENT_PRE_PREPARE from plenum.test import waits -from plenum.test.helper import sdk_send_batches_of_random, assertExp +from plenum.test.helper import vdr_send_batches_of_random, assertExp from plenum.test.test_node import ensureElectionsDone, getPrimaryReplica from plenum.test.view_change.helper import ensure_view_change from stp_core.loop.eventually import eventually @@ -15,14 +15,14 @@ def test_node_erases_last_sent_pp_key_on_view_change( - looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_client, tconf): + looper, txnPoolNodeSet, vdr_pool_handle, vdr_wallet_client, tconf): # Get a node with a backup primary replica replica = getPrimaryReplica(txnPoolNodeSet, instId=backup_inst_id) node = replica.node # Send some 3PC-batches and wait until the replica orders the 3PC-batches - sdk_send_batches_of_random(looper, txnPoolNodeSet, - sdk_pool_handle, sdk_wallet_client, + vdr_send_batches_of_random(looper, txnPoolNodeSet, + vdr_pool_handle, vdr_wallet_client, num_reqs=3, num_batches=num_batches_before, timeout=tconf.Max3PCBatchWait) @@ -44,8 +44,8 @@ def test_node_erases_last_sent_pp_key_on_view_change( assert value == [node.viewNo, 1] # Send a 3PC-batch and ensure that the replica orders it - sdk_send_batches_of_random(looper, txnPoolNodeSet, - sdk_pool_handle, sdk_wallet_client, + vdr_send_batches_of_random(looper, txnPoolNodeSet, + vdr_pool_handle, vdr_wallet_client, num_reqs=1, num_batches=num_batches_after, timeout=tconf.Max3PCBatchWait) diff --git a/plenum/test/primary_selection/test_add_node_to_pool_with_large_ppseqno.py b/plenum/test/primary_selection/test_add_node_to_pool_with_large_ppseqno.py index d9316151df..7c009fad2d 100644 --- a/plenum/test/primary_selection/test_add_node_to_pool_with_large_ppseqno.py +++ b/plenum/test/primary_selection/test_add_node_to_pool_with_large_ppseqno.py @@ -3,9 +3,9 @@ from plenum.common.messages.node_messages import Checkpoint from plenum.common.util import randomString from plenum.test.helper import get_pp_seq_no -from plenum.test.node_request.helper import sdk_ensure_pool_functional +from plenum.test.node_request.helper import vdr_ensure_pool_functional from plenum.test.view_change.helper import ensure_several_view_change -from plenum.test.pool_transactions.helper import sdk_add_new_steward_and_node +from plenum.test.pool_transactions.helper import vdr_add_new_steward_and_node from plenum.test.test_node import checkNodesConnected from plenum.test.node_catchup.helper import waitNodeDataEquality @@ -22,8 +22,8 @@ def _set_ppseqno(nodes, new_ppsn): @pytest.mark.parametrize('do_view_change', [0, 1]) -def test_add_node_to_pool_with_large_ppseqno_diff_views(do_view_change, looper, txnPoolNodeSet, tconf, sdk_pool_handle, - sdk_wallet_steward, tdir, allPluginsPath): +def test_add_node_to_pool_with_large_ppseqno_diff_views(do_view_change, looper, txnPoolNodeSet, tconf, vdr_pool_handle, + vdr_wallet_steward, tdir, allPluginsPath): """ Adding a node to the pool while ppSeqNo is big caused a node to stash all the requests because of incorrect watermarks limits set. @@ -39,17 +39,17 @@ def test_add_node_to_pool_with_large_ppseqno_diff_views(do_view_change, looper, assert (big_ppseqno > cur_ppseqno) # ensure pool is working properly - sdk_ensure_pool_functional(looper, txnPoolNodeSet, - sdk_wallet_steward, - sdk_pool_handle) + vdr_ensure_pool_functional(looper, txnPoolNodeSet, + vdr_wallet_steward, + vdr_pool_handle) assert (cur_ppseqno < get_pp_seq_no(txnPoolNodeSet)) _set_ppseqno(txnPoolNodeSet, big_ppseqno) cur_ppseqno = get_pp_seq_no(txnPoolNodeSet) assert (big_ppseqno == cur_ppseqno) - sdk_ensure_pool_functional(looper, txnPoolNodeSet, - sdk_wallet_steward, - sdk_pool_handle) + vdr_ensure_pool_functional(looper, txnPoolNodeSet, + vdr_wallet_steward, + vdr_pool_handle) assert (cur_ppseqno < get_pp_seq_no(txnPoolNodeSet)) @@ -61,21 +61,21 @@ def test_add_node_to_pool_with_large_ppseqno_diff_views(do_view_change, looper, new_steward_name = "testClientSteward" + randomString(4) new_node_name = "TestTheta" + randomString(4) - new_steward_wallet_handle, new_node = sdk_add_new_steward_and_node( - looper, sdk_pool_handle, sdk_wallet_steward, + new_steward_wallet_handle, new_node = vdr_add_new_steward_and_node( + looper, vdr_pool_handle, vdr_wallet_steward, new_steward_name, new_node_name, tdir, tconf, allPluginsPath=allPluginsPath) txnPoolNodeSet.append(new_node) looper.run(checkNodesConnected(txnPoolNodeSet)) - sdk_ensure_pool_functional(looper, txnPoolNodeSet, + vdr_ensure_pool_functional(looper, txnPoolNodeSet, new_steward_wallet_handle, - sdk_pool_handle) + vdr_pool_handle) waitNodeDataEquality(looper, new_node, *txnPoolNodeSet[:-1]) - sdk_ensure_pool_functional(looper, txnPoolNodeSet, - sdk_wallet_steward, - sdk_pool_handle) + vdr_ensure_pool_functional(looper, txnPoolNodeSet, + vdr_wallet_steward, + vdr_pool_handle) waitNodeDataEquality(looper, new_node, *txnPoolNodeSet[:-1]) diff --git a/plenum/test/primary_selection/test_add_node_with_f_changed.py b/plenum/test/primary_selection/test_add_node_with_f_changed.py index e02f114767..a0461642a0 100644 --- a/plenum/test/primary_selection/test_add_node_with_f_changed.py +++ b/plenum/test/primary_selection/test_add_node_with_f_changed.py @@ -3,7 +3,7 @@ from plenum.test.node_catchup.helper import waitNodeDataEquality from plenum.common.util import randomString from plenum.test.test_node import checkNodesConnected -from plenum.test.pool_transactions.helper import sdk_add_new_steward_and_node +from plenum.test.pool_transactions.helper import vdr_add_new_steward_and_node from plenum.test import waits logger = getlogger() @@ -19,7 +19,7 @@ def add_new_node(looper, nodes, sdk_pool_handle, sdk_wallet_steward, node_name = name or randomString(5) new_steward_name = "testClientSteward" + randomString(3) new_steward_wallet_handle, new_node = \ - sdk_add_new_steward_and_node(looper, + vdr_add_new_steward_and_node(looper, sdk_pool_handle, sdk_wallet_steward, new_steward_name, @@ -36,37 +36,37 @@ def add_new_node(looper, nodes, sdk_pool_handle, sdk_wallet_steward, def test_add_node_with_f_changed(looper, txnPoolNodeSet, tdir, tconf, - allPluginsPath, sdk_pool_handle, - sdk_wallet_steward, limitTestRunningTime): + allPluginsPath, vdr_pool_handle, + vdr_wallet_steward, limitTestRunningTime): nodes = txnPoolNodeSet add_new_node(looper, nodes, - sdk_pool_handle, - sdk_wallet_steward, + vdr_pool_handle, + vdr_wallet_steward, tdir, tconf, allPluginsPath, name="Node5") add_new_node(looper, nodes, - sdk_pool_handle, - sdk_wallet_steward, + vdr_pool_handle, + vdr_wallet_steward, tdir, tconf, allPluginsPath, name="Node6") add_new_node(looper, nodes, - sdk_pool_handle, - sdk_wallet_steward, + vdr_pool_handle, + vdr_wallet_steward, tdir, tconf, allPluginsPath, name="Node7") add_new_node(looper, nodes, - sdk_pool_handle, - sdk_wallet_steward, + vdr_pool_handle, + vdr_wallet_steward, tdir, tconf, allPluginsPath, diff --git a/plenum/test/primary_selection/test_catchup_after_view_change.py b/plenum/test/primary_selection/test_catchup_after_view_change.py index e739e3cd6d..b6a6cfbb8e 100644 --- a/plenum/test/primary_selection/test_catchup_after_view_change.py +++ b/plenum/test/primary_selection/test_catchup_after_view_change.py @@ -1,6 +1,6 @@ import pytest -from plenum.test.helper import sdk_send_random_and_check +from plenum.test.helper import vdr_send_random_and_check from plenum.test.node_catchup.helper import ensure_all_nodes_have_same_data, \ waitNodeDataInequality from plenum.test.delayers import cr_delay, ppDelay, pDelay, \ @@ -26,7 +26,7 @@ def slow_node(request, txnPoolNodeSet): @pytest.mark.skip(reasone="It's an intermittent test, INDY-722") def test_slow_nodes_catchup_before_selecting_primary_in_new_view( - looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_steward, tconf, slow_node): + looper, txnPoolNodeSet, vdr_pool_handle, vdr_wallet_steward, tconf, slow_node): """ Delay 3PC to 1 node and then cause view change so by the time the view change happens(each node gets >n-f `INSTANCE_CHANGE`s), the slow node is @@ -42,9 +42,9 @@ def test_slow_nodes_catchup_before_selecting_primary_in_new_view( slow_node.nodeIbStasher.delay(pDelay(2 * delay, 0)) slow_node.nodeIbStasher.delay(cDelay(3 * delay, 0)) for i in range(2): - sdk_send_random_and_check(looper, txnPoolNodeSet, - sdk_pool_handle, - sdk_wallet_steward, 20) + vdr_send_random_and_check(looper, txnPoolNodeSet, + vdr_pool_handle, + vdr_wallet_steward, 20) waitNodeDataInequality(looper, slow_node, *fast_nodes) catchup_reply_counts = {n.name: n.ledgerManager.spylog.count( @@ -100,7 +100,7 @@ def slow_node_processed_some(): slow_node.reset_delays_and_process_delayeds() # Make sure pool is functional - sdk_send_random_and_check(looper, txnPoolNodeSet, - sdk_pool_handle, - sdk_wallet_steward, 5) + vdr_send_random_and_check(looper, txnPoolNodeSet, + vdr_pool_handle, + vdr_wallet_steward, 5) ensure_all_nodes_have_same_data(looper, nodes=txnPoolNodeSet) diff --git a/plenum/test/primary_selection/test_catchup_multiple_rounds.py b/plenum/test/primary_selection/test_catchup_multiple_rounds.py index c4a05a05bf..2b33ab9bf6 100644 --- a/plenum/test/primary_selection/test_catchup_multiple_rounds.py +++ b/plenum/test/primary_selection/test_catchup_multiple_rounds.py @@ -2,8 +2,8 @@ from plenum.common.constants import DOMAIN_LEDGER_ID from plenum.test.delayers import delay_3pc_messages, icDelay -from plenum.test.helper import checkViewNoForNodes, sdk_send_random_and_check, \ - sdk_send_random_requests, sdk_get_and_check_replies +from plenum.test.helper import checkViewNoForNodes, vdr_send_random_and_check, \ + vdr_send_random_requests, vdr_get_and_check_replies from plenum.test.node_catchup.helper import waitNodeDataEquality from plenum.test.batching_3pc.conftest import tconf @@ -21,8 +21,8 @@ def test_slow_nodes_catchup_before_selecting_primary_in_new_view( tconf, looper, txnPoolNodeSet, - sdk_pool_handle, - sdk_wallet_client, + vdr_pool_handle, + vdr_wallet_client, one_node_added): """ Delay 3PC messages to one node and view change messages to some others @@ -40,8 +40,8 @@ def test_slow_nodes_catchup_before_selecting_primary_in_new_view( delay_3pc = 100 delay_ic = 5 - sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle, - sdk_wallet_client, 2 * Max3PCBatchSize) + vdr_send_random_and_check(looper, txnPoolNodeSet, vdr_pool_handle, + vdr_wallet_client, 2 * Max3PCBatchSize) delay_3pc_messages([slow_node], 0, delay_3pc) @@ -53,13 +53,13 @@ def start_count(): return sum([1 for e in slow_node.ledgerManager.spylog.getAll( if e.params['ledgerId'] == DOMAIN_LEDGER_ID]) s = start_count() - requests = sdk_send_random_requests(looper, sdk_pool_handle, - sdk_wallet_client, 10 * Max3PCBatchSize) + requests = vdr_send_random_requests(looper, vdr_pool_handle, + vdr_wallet_client, 10 * Max3PCBatchSize) ensure_view_change(looper, nodes=txnPoolNodeSet, exclude_from_check=nodes_slow_to_inst_chg) - sdk_get_and_check_replies(looper, requests) + vdr_get_and_check_replies(looper, requests) waitNodeDataEquality(looper, slow_node, *txnPoolNodeSet[:-1]) @@ -69,7 +69,7 @@ def start_count(): return sum([1 for e in slow_node.ledgerManager.spylog.getAll( looper.run(eventually(checkViewNoForNodes, slow_node.viewNo)) checkProtocolInstanceSetup(looper, txnPoolNodeSet, retryWait=1) - sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle, - sdk_wallet_client, 2 * Max3PCBatchSize) + vdr_send_random_and_check(looper, txnPoolNodeSet, vdr_pool_handle, + vdr_wallet_client, 2 * Max3PCBatchSize) waitNodeDataEquality(looper, new_node, *nodes_slow_to_inst_chg) diff --git a/plenum/test/primary_selection/test_new_node_accepts_chosen_primary.py b/plenum/test/primary_selection/test_new_node_accepts_chosen_primary.py index 5c7d3e1f61..80b2fdee96 100644 --- a/plenum/test/primary_selection/test_new_node_accepts_chosen_primary.py +++ b/plenum/test/primary_selection/test_new_node_accepts_chosen_primary.py @@ -2,7 +2,7 @@ from stp_core.common.log import getlogger -from plenum.test.helper import sdk_send_random_and_check +from plenum.test.helper import vdr_send_random_and_check from plenum.test.test_node import TestNode from plenum.test.view_change.helper import ensure_view_change_complete @@ -25,7 +25,7 @@ def testNodeClass(patchPluginManager): @pytest.fixture(scope="module") -def txnPoolNodeSet(txnPoolNodeSet, looper, sdk_pool_handle, sdk_wallet_steward, +def txnPoolNodeSet(txnPoolNodeSet, looper, vdr_pool_handle, vdr_wallet_steward, tconf, tdirWithPoolTxns, allPluginsPath): logger.debug("Do several view changes to round the list of primaries") @@ -35,8 +35,8 @@ def txnPoolNodeSet(txnPoolNodeSet, looper, sdk_pool_handle, sdk_wallet_steward, ensure_view_change_complete(looper, txnPoolNodeSet) logger.debug("Send requests to ensure that pool is working properly, " "viewNo: {}".format(txnPoolNodeSet[0].viewNo)) - sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle, - sdk_wallet_steward, 3) + vdr_send_random_and_check(looper, txnPoolNodeSet, vdr_pool_handle, + vdr_wallet_steward, 3) logger.debug("Pool is ready, current viewNo: {}".format(txnPoolNodeSet[0].viewNo)) @@ -61,7 +61,7 @@ def test_new_node_accepts_chosen_primary( logger.debug("Send requests to ensure that pool is working properly, " "viewNo: {}".format(txnPoolNodeSet[0].viewNo)) - sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle, + vdr_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle, new_steward_wallet_handle, 3) logger.debug("Ensure nodes data equality".format(txnPoolNodeSet[0].viewNo)) diff --git a/plenum/test/primary_selection/test_primary_selection.py b/plenum/test/primary_selection/test_primary_selection.py index db145e7eca..217a56a318 100644 --- a/plenum/test/primary_selection/test_primary_selection.py +++ b/plenum/test/primary_selection/test_primary_selection.py @@ -2,7 +2,7 @@ import pytest -from plenum.test.helper import sdk_send_random_and_check +from plenum.test.helper import vdr_send_random_and_check from plenum.test.primary_selection.helper import \ check_rank_consistent_across_each_node from plenum.test.view_change.helper import ensure_view_change @@ -23,7 +23,7 @@ def primaryReplicas(txnPoolNodeSet): # noinspection PyIncorrectDocstring def testPrimarySelectionAfterPoolReady( - looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_steward): + looper, txnPoolNodeSet, vdr_pool_handle, vdr_wallet_steward): """ Once the pool is ready(node has connected to at least 3 other nodes), appropriate primary replicas should be selected. @@ -66,8 +66,8 @@ def checkPrimaryPlacement(): # Check if every protocol instance has one and only one primary and any node # has no more than one primary checkProtocolInstanceSetup(looper, txnPoolNodeSet, retryWait=1) - sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle, - sdk_wallet_steward, 5) + vdr_send_random_and_check(looper, txnPoolNodeSet, vdr_pool_handle, + vdr_wallet_steward, 5) @pytest.fixture(scope='module') diff --git a/plenum/test/primary_selection/test_primary_selection_after_demoted_node_promotion.py b/plenum/test/primary_selection/test_primary_selection_after_demoted_node_promotion.py index 2d7f4dd38b..da2acb3319 100644 --- a/plenum/test/primary_selection/test_primary_selection_after_demoted_node_promotion.py +++ b/plenum/test/primary_selection/test_primary_selection_after_demoted_node_promotion.py @@ -6,10 +6,10 @@ from plenum.common.constants import VALIDATOR -from plenum.test.helper import sdk_send_random_and_check +from plenum.test.helper import vdr_send_random_and_check from plenum.test.pool_transactions.helper import \ - disconnect_node_and_ensure_disconnected, sdk_send_update_node + disconnect_node_and_ensure_disconnected, vdr_send_update_node from plenum.test.pool_transactions.conftest import sdk_node_theta_added from plenum.test.node_catchup.helper import ensure_all_nodes_have_same_data @@ -30,7 +30,7 @@ def check_all_nodes_the_same_pool_list(nodes): @pytest.mark.skip("Too many sdk_pool_refresh") def test_primary_selection_after_demoted_node_promotion( looper, txnPoolNodeSet, sdk_node_theta_added, - sdk_pool_handle, + vdr_pool_handle, tconf, tdir, allPluginsPath): """ Demote non-primary node @@ -53,27 +53,27 @@ def test_primary_selection_after_demoted_node_promotion( logger.info("1. Demote node Theta") node_dest = hexToFriendly(new_node.nodestack.verhex) - sdk_send_update_node(looper, new_steward_wallet, sdk_pool_handle, + vdr_send_update_node(looper, new_steward_wallet, vdr_pool_handle, node_dest, new_node.name, None, None, None, None, []) remainingNodes = list(set(txnPoolNodeSet) - {new_node}) check_all_nodes_the_same_pool_list(remainingNodes) # ensure pool is working properly - sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle, + vdr_send_random_and_check(looper, txnPoolNodeSet, vdr_pool_handle, new_steward_wallet, 3) # TODO view change might happen unexpectedly by unknown reason # checkViewNoForNodes(remainingNodes, expectedViewNo=viewNo0) logger.info("2. Promote node Theta back") - sdk_send_update_node(looper, new_steward_wallet, sdk_pool_handle, + vdr_send_update_node(looper, new_steward_wallet, vdr_pool_handle, node_dest, new_node.name, None, None, None, None, [VALIDATOR]) check_all_nodes_the_same_pool_list(txnPoolNodeSet) # ensure pool is working properly - sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle, + vdr_send_random_and_check(looper, txnPoolNodeSet, vdr_pool_handle, new_steward_wallet, 3) # checkViewNoForNodes(txnPoolNodeSet, expectedViewNo=viewNo0) @@ -86,7 +86,7 @@ def test_primary_selection_after_demoted_node_promotion( remainingNodes = list(set(txnPoolNodeSet) - {stopped_node}) ensureElectionsDone(looper, remainingNodes) # ensure pool is working properly - sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle, + vdr_send_random_and_check(looper, txnPoolNodeSet, vdr_pool_handle, new_steward_wallet, 3) # checkViewNoForNodes(remainingNodes, expectedViewNo=viewNo0) @@ -96,7 +96,7 @@ def test_primary_selection_after_demoted_node_promotion( txnPoolNodeSet = remainingNodes + [restartedNode] ensure_all_nodes_have_same_data(looper, nodes=txnPoolNodeSet) # ensure pool is working properly - sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle, + vdr_send_random_and_check(looper, txnPoolNodeSet, vdr_pool_handle, new_steward_wallet, 3) # checkViewNoForNodes(txnPoolNodeSet, expectedViewNo=viewNo0) @@ -105,5 +105,5 @@ def test_primary_selection_after_demoted_node_promotion( while txnPoolNodeSet[0].viewNo < 4: ensure_view_change_complete(looper, txnPoolNodeSet) # ensure pool is working properly - sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle, + vdr_send_random_and_check(looper, txnPoolNodeSet, vdr_pool_handle, new_steward_wallet, 3) diff --git a/plenum/test/primary_selection/test_primary_selection_after_primary_demotion_and_pool_restart.py b/plenum/test/primary_selection/test_primary_selection_after_primary_demotion_and_pool_restart.py index ffe7302247..ce59feb317 100644 --- a/plenum/test/primary_selection/test_primary_selection_after_primary_demotion_and_pool_restart.py +++ b/plenum/test/primary_selection/test_primary_selection_after_primary_demotion_and_pool_restart.py @@ -2,12 +2,12 @@ from stp_core.common.log import getlogger -from plenum.test.pool_transactions.helper import sdk_send_update_node +from plenum.test.pool_transactions.helper import vdr_send_update_node from plenum.test.test_node import TestNode, checkNodesConnected, \ ensureElectionsDone from plenum.test.helper import checkViewNoForNodes, \ - sdk_send_random_and_check + vdr_send_random_and_check from plenum.test.primary_selection.helper import getPrimaryNodesIdxs from plenum.common.config_helper import PNodeConfigHelper @@ -17,8 +17,8 @@ def test_primary_selection_after_primary_demotion_and_pool_restart(looper, txnPoolNodeSet, - sdk_pool_handle, - sdk_wallet_steward, + vdr_pool_handle, + vdr_wallet_steward, txnPoolMasterNodes, tdir, tconf): """ @@ -29,8 +29,8 @@ def test_primary_selection_after_primary_demotion_and_pool_restart(looper, logger.info("1. turn off the node which has primary replica for master instanse") master_node = txnPoolMasterNodes[0] node_dest = hexToFriendly(master_node.nodestack.verhex) - sdk_send_update_node(looper, sdk_wallet_steward, - sdk_pool_handle, + vdr_send_update_node(looper, vdr_wallet_steward, + vdr_pool_handle, node_dest, master_node.name, None, None, None, None, @@ -65,8 +65,8 @@ def test_primary_selection_after_primary_demotion_and_pool_restart(looper, looper.run(checkNodesConnected(restNodes)) ensureElectionsDone(looper, restNodes) checkViewNoForNodes(restNodes, 0) - sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle, - sdk_wallet_steward, 3) + vdr_send_random_and_check(looper, txnPoolNodeSet, vdr_pool_handle, + vdr_wallet_steward, 3) primariesIdxs = getPrimaryNodesIdxs(restNodes) assert restNodes[primariesIdxs[0]].name != master_node.name diff --git a/plenum/test/primary_selection/test_primary_selection_after_primary_demotion_and_view_changes.py b/plenum/test/primary_selection/test_primary_selection_after_primary_demotion_and_view_changes.py index 2703eae639..9960db7098 100644 --- a/plenum/test/primary_selection/test_primary_selection_after_primary_demotion_and_view_changes.py +++ b/plenum/test/primary_selection/test_primary_selection_after_primary_demotion_and_view_changes.py @@ -2,9 +2,9 @@ from stp_core.common.log import getlogger -from plenum.test.pool_transactions.helper import sdk_send_update_node +from plenum.test.pool_transactions.helper import vdr_send_update_node -from plenum.test.helper import checkViewNoForNodes, sdk_send_random_and_check +from plenum.test.helper import checkViewNoForNodes, vdr_send_random_and_check from plenum.test.test_node import ensureElectionsDone from plenum.test.view_change.helper import ensure_view_change_complete @@ -12,8 +12,8 @@ def test_primary_selection_after_primary_demotion_and_view_changes(looper, txnPoolNodeSet, - sdk_pool_handle, - sdk_wallet_steward, + vdr_pool_handle, + vdr_wallet_steward, txnPoolMasterNodes): """ Demote primary and do multiple view changes forcing primaries rotation. @@ -26,8 +26,8 @@ def test_primary_selection_after_primary_demotion_and_view_changes(looper, txnPo " this should trigger view change") master_node = txnPoolMasterNodes[0] node_dest = hexToFriendly(master_node.nodestack.verhex) - sdk_send_update_node(looper, sdk_wallet_steward, - sdk_pool_handle, + vdr_send_update_node(looper, vdr_wallet_steward, + vdr_pool_handle, node_dest, master_node.name, None, None, None, None, @@ -45,8 +45,8 @@ def test_primary_selection_after_primary_demotion_and_view_changes(looper, txnPo assert restNodes[0].replicas[0].primaryName != master_node.name # ensure pool is working properly - sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle, - sdk_wallet_steward, 3) + vdr_send_random_and_check(looper, txnPoolNodeSet, vdr_pool_handle, + vdr_wallet_steward, 3) logger.info("2. force view change 2 and check final viewNo") ensure_view_change_complete(looper, restNodes) @@ -55,8 +55,8 @@ def test_primary_selection_after_primary_demotion_and_view_changes(looper, txnPo assert restNodes[0].replicas[0].primaryName != master_node.name assert viewNo2 == viewNo1 + 1 - sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle, - sdk_wallet_steward, 3) + vdr_send_random_and_check(looper, txnPoolNodeSet, vdr_pool_handle, + vdr_wallet_steward, 3) logger.info("3. force view change 3 and check final viewNo") ensure_view_change_complete(looper, restNodes) @@ -64,8 +64,8 @@ def test_primary_selection_after_primary_demotion_and_view_changes(looper, txnPo assert restNodes[0].replicas[0].primaryName != master_node.name assert viewNo3 == viewNo2 + 1 - sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle, - sdk_wallet_steward, 3) + vdr_send_random_and_check(looper, txnPoolNodeSet, vdr_pool_handle, + vdr_wallet_steward, 3) logger.info("4. force view change 4 and check final viewNo") ensure_view_change_complete(looper, restNodes) @@ -73,5 +73,5 @@ def test_primary_selection_after_primary_demotion_and_view_changes(looper, txnPo assert restNodes[0].replicas[0].primaryName != master_node.name assert viewNo4 == viewNo3 + 1 - sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle, - sdk_wallet_steward, 3) + vdr_send_random_and_check(looper, txnPoolNodeSet, vdr_pool_handle, + vdr_wallet_steward, 3) diff --git a/plenum/test/primary_selection/test_primary_selection_pool_txn.py b/plenum/test/primary_selection/test_primary_selection_pool_txn.py index 4e81191135..a901ef2fae 100644 --- a/plenum/test/primary_selection/test_primary_selection_pool_txn.py +++ b/plenum/test/primary_selection/test_primary_selection_pool_txn.py @@ -1,7 +1,7 @@ import pytest -from plenum.test.node_request.helper import sdk_ensure_pool_functional -from plenum.test.pool_transactions.helper import sdk_add_2_nodes +from plenum.test.node_request.helper import vdr_ensure_pool_functional +from plenum.test.pool_transactions.helper import vdr_add_2_nodes from plenum.test.primary_selection.helper import check_newly_added_nodes @@ -17,21 +17,21 @@ def test_primary_selection_non_genesis_node(sdk_one_node_added, looper, - txnPoolNodeSet, sdk_pool_handle, - sdk_wallet_steward): - sdk_ensure_pool_functional(looper, txnPoolNodeSet, - sdk_wallet_steward, - sdk_pool_handle) + txnPoolNodeSet, vdr_pool_handle, + vdr_wallet_steward): + vdr_ensure_pool_functional(looper, txnPoolNodeSet, + vdr_wallet_steward, + vdr_pool_handle) @pytest.fixture(scope='module') def two_more_nodes_added(sdk_one_node_added, looper, txnPoolNodeSet, - sdk_pool_handle, sdk_wallet_steward, + vdr_pool_handle, vdr_wallet_steward, tdir, tconf, allPluginsPath): # check_accepted_view_change_sent(one_node_added, txnPoolNodeSet) - new_nodes = sdk_add_2_nodes(looper, txnPoolNodeSet, sdk_pool_handle, - sdk_wallet_steward, + new_nodes = vdr_add_2_nodes(looper, txnPoolNodeSet, vdr_pool_handle, + vdr_wallet_steward, tdir, tconf, allPluginsPath) check_newly_added_nodes(looper, txnPoolNodeSet, new_nodes) @@ -45,11 +45,11 @@ def test_primary_selection_increase_f( two_more_nodes_added, looper, txnPoolNodeSet, - sdk_wallet_steward, - sdk_pool_handle): + vdr_wallet_steward, + vdr_pool_handle): # for n in two_more_nodes_added: # check_accepted_view_change_sent(n, txnPoolNodeSet) - sdk_ensure_pool_functional(looper, txnPoolNodeSet, sdk_wallet_steward, sdk_pool_handle) + vdr_ensure_pool_functional(looper, txnPoolNodeSet, vdr_wallet_steward, vdr_pool_handle) # TODO: Add more tests to make one next primary crashed, malicious, ensure primary # selection happens after catchup diff --git a/plenum/test/primary_selection/test_promotion_before_view_change.py b/plenum/test/primary_selection/test_promotion_before_view_change.py index b7cc17fafc..364ee13c44 100644 --- a/plenum/test/primary_selection/test_promotion_before_view_change.py +++ b/plenum/test/primary_selection/test_promotion_before_view_change.py @@ -2,7 +2,7 @@ from plenum.test.node_catchup.test_config_ledger import start_stopped_node -from plenum.test.helper import sdk_send_random_and_check, checkViewNoForNodes, waitForViewChange +from plenum.test.helper import vdr_send_random_and_check, checkViewNoForNodes, waitForViewChange from plenum.test.pool_transactions.helper import demote_node, disconnect_node_and_ensure_disconnected, promote_node from plenum.test.test_node import ensureElectionsDone, checkNodesConnected @@ -14,10 +14,10 @@ def test_promotion_before_view_change(looper, tdir, tconf, allPluginsPath, - sdk_wallet_stewards, - sdk_pool_handle): + vdr_wallet_stewards, + vdr_pool_handle): - sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_stewards[0], 1) + vdr_send_random_and_check(looper, txnPoolNodeSet, vdr_pool_handle, vdr_wallet_stewards[0], 1) assert txnPoolNodeSet[0].master_replica.isPrimary assert txnPoolNodeSet[1].replicas[1].isPrimary assert txnPoolNodeSet[2].replicas[2].isPrimary @@ -28,8 +28,8 @@ def test_promotion_before_view_change(looper, node_5 = txnPoolNodeSet[4] # Demote node 2 - steward_2 = sdk_wallet_stewards[1] - demote_node(looper, steward_2, sdk_pool_handle, node_2) + steward_2 = vdr_wallet_stewards[1] + demote_node(looper, steward_2, vdr_pool_handle, node_2) disconnect_node_and_ensure_disconnected(looper, txnPoolNodeSet, node_2) looper.removeProdable(node_2) txnPoolNodeSet.remove(node_2) @@ -46,7 +46,7 @@ def test_promotion_before_view_change(looper, # Promoting node 3, increasing replica count node_2 = start_stopped_node(node_2, looper, tconf, tdir, allPluginsPath) - promote_node(looper, steward_2, sdk_pool_handle, node_2) + promote_node(looper, steward_2, vdr_pool_handle, node_2) txnPoolNodeSet.append(node_2) looper.run(checkNodesConnected(txnPoolNodeSet)) waitForViewChange(looper, @@ -56,5 +56,5 @@ def test_promotion_before_view_change(looper, # node 5 is a primary since promoted node is added at the end of the list assert node_5.master_replica.isPrimary - sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_stewards[0], 2) + vdr_send_random_and_check(looper, txnPoolNodeSet, vdr_pool_handle, vdr_wallet_stewards[0], 2) ensure_all_nodes_have_same_data(looper, txnPoolNodeSet) diff --git a/plenum/test/primary_selection/test_promotion_leads_to_correct_primary_selection.py b/plenum/test/primary_selection/test_promotion_leads_to_correct_primary_selection.py index 5d9b359e22..ead56add52 100644 --- a/plenum/test/primary_selection/test_promotion_leads_to_correct_primary_selection.py +++ b/plenum/test/primary_selection/test_promotion_leads_to_correct_primary_selection.py @@ -1,12 +1,11 @@ import json import pytest -from indy.did import create_and_store_my_did from plenum.test.node_catchup.helper import ensure_all_nodes_have_same_data from plenum.test.node_catchup.test_config_ledger import start_stopped_node -from plenum.test.helper import sdk_send_random_and_check, checkViewNoForNodes, waitForViewChange +from plenum.test.helper import vdr_send_random_and_check, checkViewNoForNodes, waitForViewChange from plenum.test.pool_transactions.helper import demote_node, disconnect_node_and_ensure_disconnected, promote_node from plenum.test.test_node import ensureElectionsDone, checkNodesConnected @@ -18,14 +17,14 @@ def test_promotion_leads_to_correct_primary_selection(looper, tdir, tconf, allPluginsPath, - sdk_wallet_stewards, - sdk_pool_handle): + vdr_wallet_stewards, + vdr_pool_handle): # We are saving pool state at moment of last view_change to send it # to newly connected nodes so they could restore primaries basing on this node set. # When current primaries getting edited because of promotion/demotion we don't take this into account. # That lead us to primary inconsistency on different nodes - sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_stewards[0], 1) + vdr_send_random_and_check(looper, txnPoolNodeSet, vdr_pool_handle, vdr_wallet_stewards[0], 1) assert txnPoolNodeSet[0].master_replica.isPrimary assert txnPoolNodeSet[1].replicas[1].isPrimary assert txnPoolNodeSet[2].replicas[2].isPrimary @@ -35,8 +34,8 @@ def test_promotion_leads_to_correct_primary_selection(looper, node_3 = txnPoolNodeSet[2] # Demote node 3 - steward_3 = sdk_wallet_stewards[2] - demote_node(looper, steward_3, sdk_pool_handle, node_3) + steward_3 = vdr_wallet_stewards[2] + demote_node(looper, steward_3, vdr_pool_handle, node_3) disconnect_node_and_ensure_disconnected(looper, txnPoolNodeSet, node_3) looper.removeProdable(node_3) txnPoolNodeSet.remove(node_3) @@ -48,7 +47,7 @@ def test_promotion_leads_to_correct_primary_selection(looper, node_1.replicas.primary_name_by_inst_id for node in txnPoolNodeSet) - sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_stewards[0], 2) + vdr_send_random_and_check(looper, txnPoolNodeSet, vdr_pool_handle, vdr_wallet_stewards[0], 2) for node in txnPoolNodeSet: assert node.f == 1 assert node.replicas.num_replicas == 2 @@ -70,7 +69,7 @@ def test_promotion_leads_to_correct_primary_selection(looper, # Promoting node 3, increasing replica count node_3 = start_stopped_node(node_3, looper, tconf, tdir, allPluginsPath) - promote_node(looper, steward_3, sdk_pool_handle, node_3) + promote_node(looper, steward_3, vdr_pool_handle, node_3) txnPoolNodeSet.append(node_3) looper.run(checkNodesConnected(txnPoolNodeSet)) @@ -79,5 +78,5 @@ def test_promotion_leads_to_correct_primary_selection(looper, ensureElectionsDone(looper, txnPoolNodeSet, instances_list=[0, 1, 2]) # Node 3 able to do ordering - sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_stewards[0], 2) + vdr_send_random_and_check(looper, txnPoolNodeSet, vdr_pool_handle, vdr_wallet_stewards[0], 2) ensure_all_nodes_have_same_data(looper, txnPoolNodeSet) diff --git a/plenum/test/primary_selection/test_propagate_primary_after_primary_restart_view_0.py b/plenum/test/primary_selection/test_propagate_primary_after_primary_restart_view_0.py index bf12e8e071..6e0f168349 100644 --- a/plenum/test/primary_selection/test_propagate_primary_after_primary_restart_view_0.py +++ b/plenum/test/primary_selection/test_propagate_primary_after_primary_restart_view_0.py @@ -1,5 +1,5 @@ from plenum.test.delayers import icDelay -from plenum.test.node_request.helper import sdk_ensure_pool_functional +from plenum.test.node_request.helper import vdr_ensure_pool_functional from plenum.test.helper import checkViewNoForNodes from plenum.test.view_change.helper import start_stopped_node from plenum.test.pool_transactions.helper import disconnect_node_and_ensure_disconnected @@ -29,7 +29,7 @@ def _get_ppseqno(nodes): def test_propagate_primary_after_primary_restart_view_0( - looper, txnPoolNodeSet, tconf, sdk_pool_handle, sdk_wallet_steward, tdir, allPluginsPath): + looper, txnPoolNodeSet, tconf, vdr_pool_handle, vdr_wallet_steward, tdir, allPluginsPath): """ Delay instance change msgs to prevent view change during primary restart to test propagate primary for primary node. @@ -37,7 +37,7 @@ def test_propagate_primary_after_primary_restart_view_0( indices correctly case viewNo == 0 """ - sdk_ensure_pool_functional(looper, txnPoolNodeSet, sdk_wallet_steward, sdk_pool_handle) + vdr_ensure_pool_functional(looper, txnPoolNodeSet, vdr_wallet_steward, vdr_pool_handle) old_ppseqno = _get_ppseqno(txnPoolNodeSet) assert (old_ppseqno > 0) @@ -72,7 +72,7 @@ def test_propagate_primary_after_primary_restart_view_0( # check ppSeqNo the same _get_ppseqno(txnPoolNodeSet) - sdk_ensure_pool_functional(looper, txnPoolNodeSet, sdk_wallet_steward, sdk_pool_handle) + vdr_ensure_pool_functional(looper, txnPoolNodeSet, vdr_wallet_steward, vdr_pool_handle) new_ppseqno = _get_ppseqno(txnPoolNodeSet) assert (new_ppseqno > old_ppseqno) diff --git a/plenum/test/primary_selection/test_propagate_primary_after_primary_restart_view_1.py b/plenum/test/primary_selection/test_propagate_primary_after_primary_restart_view_1.py index 05ec0fe826..d12bab0ba5 100644 --- a/plenum/test/primary_selection/test_propagate_primary_after_primary_restart_view_1.py +++ b/plenum/test/primary_selection/test_propagate_primary_after_primary_restart_view_1.py @@ -1,5 +1,5 @@ from plenum.test.delayers import icDelay -from plenum.test.node_request.helper import sdk_ensure_pool_functional +from plenum.test.node_request.helper import vdr_ensure_pool_functional from plenum.test.helper import checkViewNoForNodes from plenum.test.view_change.helper import start_stopped_node, ensure_view_change from plenum.test.pool_transactions.helper import disconnect_node_and_ensure_disconnected @@ -29,7 +29,7 @@ def _get_ppseqno(nodes): def test_propagate_primary_after_primary_restart_view_1( - looper, txnPoolNodeSet, tconf, sdk_pool_handle, sdk_wallet_steward, tdir, allPluginsPath): + looper, txnPoolNodeSet, tconf, vdr_pool_handle, vdr_wallet_steward, tdir, allPluginsPath): """ Delay instance change msgs to prevent view change during primary restart to test propagate primary for primary node. @@ -41,7 +41,7 @@ def test_propagate_primary_after_primary_restart_view_1( ensure_view_change(looper, txnPoolNodeSet) checkViewNoForNodes(txnPoolNodeSet, expectedViewNo=1) - sdk_ensure_pool_functional(looper, txnPoolNodeSet, sdk_wallet_steward, sdk_pool_handle) + vdr_ensure_pool_functional(looper, txnPoolNodeSet, vdr_wallet_steward, vdr_pool_handle) old_ppseqno = _get_ppseqno(txnPoolNodeSet) assert (old_ppseqno > 0) @@ -76,7 +76,7 @@ def test_propagate_primary_after_primary_restart_view_1( # check ppSeqNo the same _get_ppseqno(txnPoolNodeSet) - sdk_ensure_pool_functional(looper, txnPoolNodeSet, sdk_wallet_steward, sdk_pool_handle) + vdr_ensure_pool_functional(looper, txnPoolNodeSet, vdr_wallet_steward, vdr_pool_handle) new_ppseqno = _get_ppseqno(txnPoolNodeSet) assert (new_ppseqno > old_ppseqno) diff --git a/plenum/test/primary_selection/test_reconnect_primary_and_not_primary.py b/plenum/test/primary_selection/test_reconnect_primary_and_not_primary.py index ce0eac726d..619d4b0a57 100644 --- a/plenum/test/primary_selection/test_reconnect_primary_and_not_primary.py +++ b/plenum/test/primary_selection/test_reconnect_primary_and_not_primary.py @@ -1,6 +1,6 @@ import pytest from plenum.test.pool_transactions.helper import disconnect_node_and_ensure_disconnected -from plenum.test.helper import checkViewNoForNodes, sdk_send_random_and_check +from plenum.test.helper import checkViewNoForNodes, vdr_send_random_and_check from plenum.test.test_node import checkNodesConnected from stp_core.loop.eventually import eventually from functools import partial @@ -19,8 +19,8 @@ def check_count_connected_node(nodes, expected_count): def test_reconnect_primary_and_not_primary(looper, txnPoolNodeSet, - sdk_wallet_steward, - sdk_pool_handle, + vdr_wallet_steward, + vdr_pool_handle, tconf): """ Test steps: @@ -38,7 +38,7 @@ def test_reconnect_primary_and_not_primary(looper, 10. Send some requests and check, that pool works. """ restNodes = set(txnPoolNodeSet) - sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_steward, 5) + vdr_send_random_and_check(looper, txnPoolNodeSet, vdr_pool_handle, vdr_wallet_steward, 5) assert txnPoolNodeSet[0].master_replica.isPrimary node_after_all_primary = txnPoolNodeSet[3] # Disconnect node after all primaries (after all backup primaries) @@ -51,7 +51,7 @@ def test_reconnect_primary_and_not_primary(looper, looper.run(eventually(partial(check_count_connected_node, restNodes, 6), timeout=5, acceptableExceptions=[AssertionError])) - sdk_send_random_and_check(looper, restNodes, sdk_pool_handle, sdk_wallet_steward, 5) + vdr_send_random_and_check(looper, restNodes, vdr_pool_handle, vdr_wallet_steward, 5) # Get primary node for backup replica primary_node = txnPoolNodeSet[0] assert primary_node.master_replica.isPrimary @@ -68,7 +68,7 @@ def test_reconnect_primary_and_not_primary(looper, acceptableExceptions=[AssertionError])) looper.run(eventually(partial(checkViewNoForNodes, restNodes, expectedViewNo=old_view_no + 1), timeout=tconf.NEW_VIEW_TIMEOUT)) - sdk_send_random_and_check(looper, restNodes, sdk_pool_handle, sdk_wallet_steward, 5) + vdr_send_random_and_check(looper, restNodes, vdr_pool_handle, vdr_wallet_steward, 5) logger.debug("restNodes: {}".format(restNodes)) restNodes.add(node_after_all_primary) # Return back node after all primary @@ -79,12 +79,12 @@ def test_reconnect_primary_and_not_primary(looper, timeout=5, acceptableExceptions=[AssertionError])) assert len(set([len(n.replicas) for n in restNodes])) == 1 - sdk_send_random_and_check(looper, restNodes, sdk_pool_handle, sdk_wallet_steward, 5) + vdr_send_random_and_check(looper, restNodes, vdr_pool_handle, vdr_wallet_steward, 5) # Return back primary node restNodes.add(primary_node) reconnect_node_and_ensure_connected(looper, restNodes, primary_node) looper.run(checkNodesConnected(restNodes, customTimeout=5*tconf.RETRY_TIMEOUT_RESTRICTED)) - sdk_send_random_and_check(looper, restNodes, sdk_pool_handle, sdk_wallet_steward, 5) + vdr_send_random_and_check(looper, restNodes, vdr_pool_handle, vdr_wallet_steward, 5) diff --git a/plenum/test/primary_selection/test_recover_after_demoted.py b/plenum/test/primary_selection/test_recover_after_demoted.py index 168d9c5bc0..a98cc6aefc 100644 --- a/plenum/test/primary_selection/test_recover_after_demoted.py +++ b/plenum/test/primary_selection/test_recover_after_demoted.py @@ -1,4 +1,4 @@ -from plenum.test.helper import sdk_send_random_and_check, waitForViewChange +from plenum.test.helper import vdr_send_random_and_check, waitForViewChange from plenum.test.node_catchup.helper import ensure_all_nodes_have_same_data from plenum.test.pool_transactions.helper import demote_node from plenum.test.view_change.helper import ensure_view_change_by_primary_restart @@ -24,11 +24,11 @@ def demote_primary_node(looper, def test_restart_primaries_then_demote( looper, txnPoolNodeSet, tconf, tdir, allPluginsPath, - sdk_pool_handle, - sdk_wallet_stewards): + vdr_pool_handle, + vdr_wallet_stewards): """ """ - sdk_wallet_steward = sdk_wallet_stewards[0] + sdk_wallet_steward = vdr_wallet_stewards[0] logger.info("1. Restart Node1") pool_of_nodes = ensure_view_change_by_primary_restart(looper, txnPoolNodeSet, @@ -39,7 +39,7 @@ def test_restart_primaries_then_demote( exclude_from_check=['check_last_ordered_3pc_backup']) # ensure pool is working properly - sdk_send_random_and_check(looper, pool_of_nodes, sdk_pool_handle, + vdr_send_random_and_check(looper, pool_of_nodes, vdr_pool_handle, sdk_wallet_steward, 1) logger.info("2. Restart Node2") @@ -52,7 +52,7 @@ def test_restart_primaries_then_demote( exclude_from_check=['check_last_ordered_3pc_backup']) # ensure pool is working properly - sdk_send_random_and_check(looper, pool_of_nodes, sdk_pool_handle, + vdr_send_random_and_check(looper, pool_of_nodes, vdr_pool_handle, sdk_wallet_steward, 1) logger.info("3. Demote Node3") @@ -60,13 +60,13 @@ def test_restart_primaries_then_demote( pool_of_nodes = demote_primary_node(looper, txnPoolNodeSet, pool_of_nodes, - sdk_pool_handle, - sdk_wallet_stewards) + vdr_pool_handle, + vdr_wallet_stewards) # make sure view changed waitForViewChange(looper, pool_of_nodes, expectedViewNo=3) # ensure pool is working properly - sdk_send_random_and_check(looper, pool_of_nodes, sdk_pool_handle, + vdr_send_random_and_check(looper, pool_of_nodes, vdr_pool_handle, sdk_wallet_steward, 10) ensure_all_nodes_have_same_data(looper, nodes=pool_of_nodes) diff --git a/plenum/test/primary_selection/test_recover_more_than_f_failure.py b/plenum/test/primary_selection/test_recover_more_than_f_failure.py index e69e776f4c..8366457066 100644 --- a/plenum/test/primary_selection/test_recover_more_than_f_failure.py +++ b/plenum/test/primary_selection/test_recover_more_than_f_failure.py @@ -2,7 +2,7 @@ from stp_core.common.log import getlogger from plenum.test.helper import waitForViewChange, \ - sdk_send_random_and_check + vdr_send_random_and_check from plenum.test.node_catchup.helper import ensure_all_nodes_have_same_data from plenum.test.pool_transactions.helper import \ disconnect_node_and_ensure_disconnected @@ -14,8 +14,8 @@ def test_recover_stop_primaries(looper, checkpoint_size, txnPoolNodeSet, - allPluginsPath, tdir, tconf, sdk_pool_handle, - sdk_wallet_steward): + allPluginsPath, tdir, tconf, vdr_pool_handle, + vdr_wallet_steward): """ Test that we can recover after having more than f nodes disconnected: - stop current master primary (Alpha) @@ -39,8 +39,8 @@ def test_recover_stop_primaries(looper, checkpoint_size, txnPoolNodeSet, ensure_all_nodes_have_same_data(looper, nodes=active_nodes) logger.info("send at least one checkpoint") - sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle, - sdk_wallet_steward, 2 * checkpoint_size - 1) + vdr_send_random_and_check(looper, txnPoolNodeSet, vdr_pool_handle, + vdr_wallet_steward, 2 * checkpoint_size - 1) # TODO: When stable checkpoint is not deleted it makes sense to check just our last checkpoint # and remove eventually. looper.run(eventually(check_for_nodes, active_nodes, check_stable_checkpoint, 2 * checkpoint_freq)) @@ -65,8 +65,8 @@ def test_recover_stop_primaries(looper, checkpoint_size, txnPoolNodeSet, exclude_from_check=['check_last_ordered_3pc_backup']) logger.info("Check if the pool is able to process requests") - sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle, - sdk_wallet_steward, 10 * checkpoint_size) + vdr_send_random_and_check(looper, txnPoolNodeSet, vdr_pool_handle, + vdr_wallet_steward, 10 * checkpoint_size) ensure_all_nodes_have_same_data(looper, nodes=active_nodes, exclude_from_check=['check_last_ordered_3pc_backup']) looper.run(eventually(check_for_nodes, active_nodes, check_stable_checkpoint, 12 * checkpoint_freq)) diff --git a/plenum/test/primary_selection/test_recover_primary_no_view_change.py b/plenum/test/primary_selection/test_recover_primary_no_view_change.py index 72d5954dde..d5c22fc140 100644 --- a/plenum/test/primary_selection/test_recover_primary_no_view_change.py +++ b/plenum/test/primary_selection/test_recover_primary_no_view_change.py @@ -5,7 +5,7 @@ from plenum.test.conftest import getValueFromModule from plenum.test.helper import waitForViewChange, \ - sdk_send_random_and_check + vdr_send_random_and_check from plenum.test.node_catchup.helper import ensure_all_nodes_have_same_data from plenum.test.test_node import ensureElectionsDone from plenum.test.view_change.helper import start_stopped_node @@ -26,8 +26,8 @@ def tconf(tconf): def test_recover_stop_primaries_no_view_change(looper, checkpoint_size, txnPoolNodeSet, - allPluginsPath, tdir, tconf, sdk_pool_handle, - sdk_wallet_steward): + allPluginsPath, tdir, tconf, vdr_pool_handle, + vdr_wallet_steward): """ Test that we can recover after having more than f nodes disconnected: - send txns @@ -43,8 +43,8 @@ def test_recover_stop_primaries_no_view_change(looper, checkpoint_size, txnPoolN logger.info("send at least one checkpoint") check_for_nodes(active_nodes, check_stable_checkpoint, 0) - sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle, - sdk_wallet_steward, 2 * checkpoint_size) + vdr_send_random_and_check(looper, txnPoolNodeSet, vdr_pool_handle, + vdr_wallet_steward, 2 * checkpoint_size) # TODO: When stable checkpoint is not deleted it makes sense to check just our last checkpoint # and remove eventually looper.run(eventually(check_for_nodes, active_nodes, check_stable_checkpoint, 2 * checkpoint_freq)) @@ -69,8 +69,8 @@ def test_recover_stop_primaries_no_view_change(looper, checkpoint_size, txnPoolN exclude_from_check=['check_last_ordered_3pc_backup']) logger.info("Check if the pool is able to process requests") - sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle, - sdk_wallet_steward, 10 * checkpoint_size) + vdr_send_random_and_check(looper, txnPoolNodeSet, vdr_pool_handle, + vdr_wallet_steward, 10 * checkpoint_size) ensure_all_nodes_have_same_data(looper, nodes=active_nodes, exclude_from_check=['check_last_ordered_3pc_backup']) looper.run(eventually(check_for_nodes, active_nodes, check_stable_checkpoint, 12 * checkpoint_freq)) diff --git a/plenum/test/primary_selection/test_selection_f_plus_one_quorum.py b/plenum/test/primary_selection/test_selection_f_plus_one_quorum.py index d7985eb32f..72cc2412d3 100644 --- a/plenum/test/primary_selection/test_selection_f_plus_one_quorum.py +++ b/plenum/test/primary_selection/test_selection_f_plus_one_quorum.py @@ -1,6 +1,6 @@ import pytest -from plenum.test.helper import waitForViewChange, sdk_send_random_and_check +from plenum.test.helper import waitForViewChange, vdr_send_random_and_check from plenum.test.node_catchup.helper import ensure_all_nodes_have_same_data from plenum.test.pool_transactions.helper import disconnect_node_and_ensure_disconnected, \ reconnect_node_and_ensure_connected @@ -10,7 +10,7 @@ def test_selection_f_plus_one_quorum(looper, txnPoolNodeSet, allPluginsPath, - tdir, tconf, sdk_pool_handle, sdk_wallet_client): + tdir, tconf, vdr_pool_handle, vdr_wallet_client): """ Check that quorum f + 1 is used for primary selection when initiated by CurrentState messages. @@ -58,4 +58,4 @@ def test_selection_f_plus_one_quorum(looper, txnPoolNodeSet, allPluginsPath, instances_list=range(2), customTimeout=30) waitForViewChange(looper, active_nodes, expectedViewNo=expected_view_no) - sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_client, 1) + vdr_send_random_and_check(looper, txnPoolNodeSet, vdr_pool_handle, vdr_wallet_client, 1) diff --git a/plenum/test/propagate/test_propagate_recvd_after_request.py b/plenum/test/propagate/test_propagate_recvd_after_request.py index f79a6f6b5e..4c7706f547 100644 --- a/plenum/test/propagate/test_propagate_recvd_after_request.py +++ b/plenum/test/propagate/test_propagate_recvd_after_request.py @@ -2,7 +2,7 @@ import pytest from plenum.common.constants import PROPAGATE -from plenum.test.helper import sdk_json_to_request_object, sdk_send_random_requests +from plenum.test.helper import vdr_json_to_request_object, vdr_send_random_requests from plenum.test.spy_helpers import get_count from stp_core.loop.eventually import eventually from plenum.common.messages.node_messages import Propagate @@ -17,7 +17,7 @@ @pytest.fixture() -def setup(txnPoolNodeSet, looper, sdk_pool_handle, sdk_wallet_client,): +def setup(txnPoolNodeSet, looper, vdr_pool_handle, vdr_wallet_client,): def _clean(*args): pass @@ -29,14 +29,14 @@ def _clean(*args): # disable _clean method which remove req.key from requests map A.requests._clean = types.MethodType( _clean, A.requests) - request_couple_json = sdk_send_random_requests( - looper, sdk_pool_handle, sdk_wallet_client, reqCount) + request_couple_json = vdr_send_random_requests( + looper, vdr_pool_handle, vdr_wallet_client, reqCount) return request_couple_json def testPropagateRecvdAfterRequest(setup, looper, txnPoolNodeSet): A, B, C, D = txnPoolNodeSet # type: TestNode - sent1 = sdk_json_to_request_object(setup[0][0]) + sent1 = vdr_json_to_request_object(setup[0][0]) def x(): # A should have received a request from the client diff --git a/plenum/test/recorder/conftest.py b/plenum/test/recorder/conftest.py index 1964acee91..af05650279 100644 --- a/plenum/test/recorder/conftest.py +++ b/plenum/test/recorder/conftest.py @@ -4,8 +4,8 @@ from plenum.common.util import randomString -from plenum.test.pool_transactions.helper import sdk_add_new_nym -from plenum.test.helper import sdk_send_random_and_check +from plenum.test.pool_transactions.helper import vdr_add_new_nym +from plenum.test.helper import vdr_send_random_and_check from plenum.test.recorder.helper import create_recorder_for_test, \ reload_modules_for_recorder @@ -21,14 +21,14 @@ def tconf(tconf): @pytest.fixture(scope="module") -def some_txns_done(tconf, txnPoolNodesLooper, txnPoolNodeSet, sdk_pool_handle, - sdk_wallet_steward): +def some_txns_done(tconf, txnPoolNodesLooper, txnPoolNodeSet, vdr_pool_handle, + vdr_wallet_steward): for i in range(math.ceil(TOTAL_TXNS / 2)): - sdk_add_new_nym(txnPoolNodesLooper, sdk_pool_handle, sdk_wallet_steward, + vdr_add_new_nym(txnPoolNodesLooper, vdr_pool_handle, vdr_wallet_steward, alias='testSteward' + randomString(100)) for i in range(math.floor(TOTAL_TXNS / 2)): - sdk_send_random_and_check(txnPoolNodesLooper, txnPoolNodeSet, - sdk_pool_handle, sdk_wallet_steward, 5) + vdr_send_random_and_check(txnPoolNodesLooper, txnPoolNodeSet, + vdr_pool_handle, vdr_wallet_steward, 5) @pytest.fixture() diff --git a/plenum/test/recorder/test_replay_node_bouncing.py b/plenum/test/recorder/test_replay_node_bouncing.py index e1ae7e0613..ddc0378eca 100644 --- a/plenum/test/recorder/test_replay_node_bouncing.py +++ b/plenum/test/recorder/test_replay_node_bouncing.py @@ -8,7 +8,7 @@ from plenum.common.config_helper import PNodeConfigHelper from plenum.test.recorder.helper import reload_modules_for_replay, \ get_replayable_node_class, create_replayable_node_and_check -from plenum.test.helper import checkViewNoForNodes, sdk_send_random_and_check +from plenum.test.helper import checkViewNoForNodes, vdr_send_random_and_check from plenum.test.node_catchup.helper import ensure_all_nodes_have_same_data from stp_core.loop.eventually import eventually from stp_core.types import HA @@ -20,7 +20,7 @@ def test_replay_new_bouncing(txnPoolNodesLooper, txnPoolNodeSet, tconf, tdir, testNodeClass, tmpdir_factory, node_config_helper_class, allPluginsPath, - some_txns_done, sdk_pool_handle, sdk_wallet_client): + some_txns_done, vdr_pool_handle, vdr_wallet_client): alpha = txnPoolNodeSet[0] old_view_no = alpha.viewNo other_nodes = txnPoolNodeSet[1:] @@ -31,9 +31,9 @@ def test_replay_new_bouncing(txnPoolNodesLooper, txnPoolNodeSet, tconf, tdir, txnPoolNodesLooper.run(eventually(checkViewNoForNodes, other_nodes, old_view_no + 1, retryWait=1, timeout=30)) - sdk_send_random_and_check(txnPoolNodesLooper, other_nodes, - sdk_pool_handle, - sdk_wallet_client, 10) + vdr_send_random_and_check(txnPoolNodesLooper, other_nodes, + vdr_pool_handle, + vdr_wallet_client, 10) ensure_all_nodes_have_same_data(txnPoolNodesLooper, other_nodes) for node in other_nodes: @@ -56,9 +56,9 @@ def test_replay_new_bouncing(txnPoolNodesLooper, txnPoolNodeSet, tconf, tdir, restarting_at = time.perf_counter() print('Stopped for {}'.format(restarting_at - stopping_at)) - sdk_send_random_and_check(txnPoolNodesLooper, txnPoolNodeSet, - sdk_pool_handle, - sdk_wallet_client, 10) + vdr_send_random_and_check(txnPoolNodesLooper, txnPoolNodeSet, + vdr_pool_handle, + vdr_wallet_client, 10) ensure_all_nodes_have_same_data(txnPoolNodesLooper, txnPoolNodeSet) for node in txnPoolNodeSet: diff --git a/plenum/test/recorder/test_replay_on_new_node.py b/plenum/test/recorder/test_replay_on_new_node.py index 26a4e1d7f5..a51f74e9c2 100644 --- a/plenum/test/recorder/test_replay_on_new_node.py +++ b/plenum/test/recorder/test_replay_on_new_node.py @@ -11,9 +11,9 @@ def test_replay_on_new_node(txnPoolNodesLooper, txnPoolNodeSet, tconf, tdir, testNodeClass, tmpdir_factory, node_config_helper_class, allPluginsPath, - sdk_new_node_caught_up): # noqa: F811 + vdr_new_node_caught_up): # noqa: F811 - new_node = sdk_new_node_caught_up + new_node = vdr_new_node_caught_up for node in txnPoolNodeSet: txnPoolNodesLooper.removeProdable(node) diff --git a/plenum/test/recorder/test_replay_with_view_change.py b/plenum/test/recorder/test_replay_with_view_change.py index a04e5eef08..c889c82912 100644 --- a/plenum/test/recorder/test_replay_with_view_change.py +++ b/plenum/test/recorder/test_replay_with_view_change.py @@ -1,7 +1,7 @@ import pytest from plenum.common.config_util import getConfigOnce -from plenum.test.helper import sdk_send_random_and_check +from plenum.test.helper import vdr_send_random_and_check from plenum.test.node_catchup.helper import ensure_all_nodes_have_same_data from plenum.test.recorder.helper import reload_modules_for_replay, \ get_replayable_node_class, create_replayable_node_and_check @@ -15,7 +15,7 @@ @pytest.mark.skip(reason="The test takes too much time! Needs to be re-factored") def test_view_change_after_some_txns(txnPoolNodesLooper, txnPoolNodeSet, some_txns_done, testNodeClass, viewNo, # noqa - sdk_pool_handle, sdk_wallet_client, + vdr_pool_handle, vdr_wallet_client, node_config_helper_class, tconf, tdir, allPluginsPath, tmpdir_factory): """ @@ -25,8 +25,8 @@ def test_view_change_after_some_txns(txnPoolNodesLooper, txnPoolNodeSet, ensureElectionsDone(looper=txnPoolNodesLooper, nodes=txnPoolNodeSet) ensure_all_nodes_have_same_data(txnPoolNodesLooper, nodes=txnPoolNodeSet) - sdk_send_random_and_check(txnPoolNodesLooper, txnPoolNodeSet, sdk_pool_handle, - sdk_wallet_client, 10) + vdr_send_random_and_check(txnPoolNodesLooper, txnPoolNodeSet, vdr_pool_handle, + vdr_wallet_client, 10) ensure_all_nodes_have_same_data(txnPoolNodesLooper, txnPoolNodeSet) for node in txnPoolNodeSet: diff --git a/plenum/test/replica/conftest.py b/plenum/test/replica/conftest.py index b9f509fa41..38268f80d2 100644 --- a/plenum/test/replica/conftest.py +++ b/plenum/test/replica/conftest.py @@ -15,7 +15,7 @@ from plenum.server.quorums import Quorums from plenum.server.replica import Replica from plenum.test.conftest import getValueFromModule -from plenum.test.helper import MockTimestamp, sdk_random_request_objects, create_pre_prepare_params, \ +from plenum.test.helper import MockTimestamp, vdr_random_request_objects, create_pre_prepare_params, \ create_prepare_from_pre_prepare from plenum.test.testing_utils import FakeSomething from plenum.test.bls.conftest import fake_state_root_hash, fake_multi_sig, fake_multi_sig_value @@ -104,7 +104,7 @@ def mock_timestamp(): @pytest.fixture() def fake_requests(): - return sdk_random_request_objects(10, identifier="fake_did", + return vdr_random_request_objects(10, identifier="fake_did", protocol_version=CURRENT_PROTOCOL_VERSION) diff --git a/plenum/test/replica/stashing/test_stash_future_view.py b/plenum/test/replica/stashing/test_stash_future_view.py index c34b778e66..feddef86f1 100644 --- a/plenum/test/replica/stashing/test_stash_future_view.py +++ b/plenum/test/replica/stashing/test_stash_future_view.py @@ -1,7 +1,7 @@ from plenum.common.constants import COMMIT, PREPREPARE, PREPARE from plenum.server.replica_validator_enums import STASH_VIEW_3PC from plenum.test.delayers import msg_rep_delay, nv_delay -from plenum.test.helper import waitForViewChange, sdk_send_random_and_check +from plenum.test.helper import waitForViewChange, vdr_send_random_and_check from plenum.test.node_catchup.helper import waitNodeDataEquality from plenum.test.stasher import delay_rules from plenum.test.test_node import ensureElectionsDone @@ -10,8 +10,8 @@ def test_process_three_phase_msg_and_stashed_future_view(txnPoolNodeSet, looper, tconf, - sdk_pool_handle, - sdk_wallet_steward): + vdr_pool_handle, + vdr_wallet_steward): """ 1. Delay ViewChangeDone messages for the slow_node. 2. Start view change on all nodes. @@ -35,10 +35,10 @@ def test_process_three_phase_msg_and_stashed_future_view(txnPoolNodeSet, looper, ensureElectionsDone(looper=looper, nodes=fast_nodes, instances_list=range(fast_nodes[0].requiredNumberOfInstances)) - sdk_send_random_and_check(looper, + vdr_send_random_and_check(looper, txnPoolNodeSet, - sdk_pool_handle, - sdk_wallet_steward, + vdr_pool_handle, + vdr_wallet_steward, 1) assert slow_node.view_change_in_progress # 1 - pre-prepare msg diff --git a/plenum/test/replica/stashing/test_stash_out_of_watermarks.py b/plenum/test/replica/stashing/test_stash_out_of_watermarks.py index 39a8ed9623..27039efd7f 100644 --- a/plenum/test/replica/stashing/test_stash_out_of_watermarks.py +++ b/plenum/test/replica/stashing/test_stash_out_of_watermarks.py @@ -1,7 +1,7 @@ from plenum.common.constants import COMMIT, PREPREPARE, PREPARE from plenum.server.replica_validator_enums import STASH_WATERMARKS from plenum.test.delayers import chk_delay, msg_rep_delay -from plenum.test.helper import sdk_send_random_and_check, sdk_send_batches_of_random_and_check, incoming_3pc_msgs_count +from plenum.test.helper import vdr_send_random_and_check, vdr_send_batches_of_random_and_check, incoming_3pc_msgs_count from plenum.test.node_catchup.helper import ensure_all_nodes_have_same_data from plenum.test.stasher import delay_rules from stp_core.loop.eventually import eventually @@ -13,8 +13,8 @@ def test_process_three_phase_msg_and_stashed_for_next_checkpoint(txnPoolNodeSet, looper, - sdk_pool_handle, - sdk_wallet_client, + vdr_pool_handle, + vdr_wallet_client, chkFreqPatched): """ 1. Delay checkpoints processing on the slow_node. That is checkpoint on this node @@ -42,20 +42,20 @@ def test_process_three_phase_msg_and_stashed_for_next_checkpoint(txnPoolNodeSet, with delay_rules([slow_node.nodeIbStasher, ], msg_rep_delay(types_to_delay=[PREPREPARE, PREPARE, COMMIT])): with delay_rules([slow_node.nodeIbStasher, ], chk_delay()): - sdk_send_batches_of_random_and_check(looper, + vdr_send_batches_of_random_and_check(looper, txnPoolNodeSet, - sdk_pool_handle, - sdk_wallet_client, + vdr_pool_handle, + vdr_wallet_client, num_reqs=1 * CHK_FREQ, num_batches=CHK_FREQ) ensure_all_nodes_have_same_data(looper, nodes=txnPoolNodeSet) looper.run(eventually(_check_checkpoint_finalize, fast_nodes, CHK_FREQ)) - sdk_send_random_and_check(looper, + vdr_send_random_and_check(looper, txnPoolNodeSet, - sdk_pool_handle, - sdk_wallet_client, + vdr_pool_handle, + vdr_wallet_client, 1) stashed_messages = incoming_3pc_msgs_count(len(txnPoolNodeSet)) diff --git a/plenum/test/replica/stashing/test_unstash_after_catchup.py b/plenum/test/replica/stashing/test_unstash_after_catchup.py index 63497fa691..9fb53e668d 100644 --- a/plenum/test/replica/stashing/test_unstash_after_catchup.py +++ b/plenum/test/replica/stashing/test_unstash_after_catchup.py @@ -4,18 +4,18 @@ from plenum.common.startable import Mode from plenum.server.replica_validator_enums import STASH_CATCH_UP, STASH_VIEW_3PC from plenum.test.delayers import msg_rep_delay, cDelay, cr_delay -from plenum.test.helper import sdk_send_random_and_check, assertExp, sdk_send_random_request, \ - sdk_get_and_check_replies, get_pp_seq_no +from plenum.test.helper import vdr_send_random_and_check, assertExp, vdr_send_random_request, \ + vdr_get_and_check_replies, get_pp_seq_no from plenum.test.node_catchup.helper import ensure_all_nodes_have_same_data -from plenum.test.node_request.helper import sdk_ensure_pool_functional +from plenum.test.node_request.helper import vdr_ensure_pool_functional from plenum.test.stasher import delay_rules from plenum.test.test_node import ensureElectionsDone from stp_core.loop.eventually import eventually def test_unstash_three_phase_msg_after_catchup(txnPoolNodeSet, looper, tconf, - sdk_pool_handle, - sdk_wallet_steward): + vdr_pool_handle, + vdr_wallet_steward): """ 1. Delay Commit on Node4 2. Order 1 req @@ -43,15 +43,15 @@ def test_unstash_three_phase_msg_after_catchup(txnPoolNodeSet, looper, tconf, # Delay Commit messages for slow_node. slow_node.nodeIbStasher.delay(cDelay(sys.maxsize)) - sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle, - sdk_wallet_steward, 1) + vdr_send_random_and_check(looper, txnPoolNodeSet, vdr_pool_handle, + vdr_wallet_steward, 1) batches_count += 1 # Delay Commit messages for fast_nodes. for n in fast_nodes: n.nodeIbStasher.delay(cDelay(sys.maxsize)) - request2 = sdk_send_random_request(looper, sdk_pool_handle, sdk_wallet_steward) + request2 = vdr_send_random_request(looper, vdr_pool_handle, vdr_wallet_steward) batches_count += 1 def check_commits(commit_key): @@ -86,12 +86,12 @@ def check_commits(commit_key): old_stashed, (len(txnPoolNodeSet) - 1) * 2)) - sdk_get_and_check_replies(looper, [request2]) + vdr_get_and_check_replies(looper, [request2]) _check_nodes_stashed(fast_nodes, old_stashed, 0) assert get_pp_seq_no(txnPoolNodeSet) == batches_count ensure_all_nodes_have_same_data(looper, txnPoolNodeSet) - sdk_ensure_pool_functional(looper, txnPoolNodeSet, sdk_wallet_steward, sdk_pool_handle) + vdr_ensure_pool_functional(looper, txnPoolNodeSet, vdr_wallet_steward, vdr_pool_handle) def _check_nodes_stashed(nodes, old_stashed, new_stashed): diff --git a/plenum/test/replica/test_backup_can_order_after_catchup.py b/plenum/test/replica/test_backup_can_order_after_catchup.py index f55ca8b0c4..822a69cee7 100644 --- a/plenum/test/replica/test_backup_can_order_after_catchup.py +++ b/plenum/test/replica/test_backup_can_order_after_catchup.py @@ -3,7 +3,7 @@ from plenum.server.replica_helper import generateName from plenum.server.replicas import MASTER_REPLICA_INDEX from plenum.test.delayers import cDelay, pDelay, ppDelay, old_view_pp_request_delay -from plenum.test.helper import sdk_send_random_and_check +from plenum.test.helper import vdr_send_random_and_check from plenum.test.node_catchup.helper import ensure_all_nodes_have_same_data from plenum.test.stasher import delay_rules_without_processing from plenum.test.test_node import check_not_in_view_change @@ -26,8 +26,8 @@ def tconf(tconf): def test_backup_can_order_after_catchup(txnPoolNodeSet, looper, - sdk_pool_handle, - sdk_wallet_client): + vdr_pool_handle, + vdr_wallet_client): # We expect that after VC Gamma will be primary on backup delayed_node = txnPoolNodeSet[-2] fast_nodes = [n for n in txnPoolNodeSet if n != delayed_node] @@ -35,7 +35,7 @@ def test_backup_can_order_after_catchup(txnPoolNodeSet, pDelay(instId=MASTER_REPLICA_INDEX), cDelay(instId=MASTER_REPLICA_INDEX), ppDelay(instId=MASTER_REPLICA_INDEX)): - sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_client, REQUEST_COUNT) + vdr_send_random_and_check(looper, txnPoolNodeSet, vdr_pool_handle, vdr_wallet_client, REQUEST_COUNT) with delay_rules_without_processing([n.nodeIbStasher for n in txnPoolNodeSet], old_view_pp_request_delay()): ensure_view_change(looper, txnPoolNodeSet) @@ -54,7 +54,7 @@ def check_backup_primaries(): looper.run(eventually(check_backup_primaries)) # Check, that backup cannot order - sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_client, REQUEST_COUNT) + vdr_send_random_and_check(looper, txnPoolNodeSet, vdr_pool_handle, vdr_wallet_client, REQUEST_COUNT) for n in txnPoolNodeSet: assert n.replicas._replicas[BACKUP_INST_ID].last_ordered_3pc[1] == 0 @@ -64,6 +64,6 @@ def check_backup_primaries(): # Check, that backup can order after catchup b_pp_seq_no_before = delayed_node.replicas[BACKUP_INST_ID].last_ordered_3pc[1] - sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_client, REQUEST_COUNT) + vdr_send_random_and_check(looper, txnPoolNodeSet, vdr_pool_handle, vdr_wallet_client, REQUEST_COUNT) assert delayed_node.replicas[BACKUP_INST_ID].last_ordered_3pc[1] == \ b_pp_seq_no_before + REQUEST_COUNT diff --git a/plenum/test/replica/test_backups_dont_order_while_reordering.py b/plenum/test/replica/test_backups_dont_order_while_reordering.py index 4332f54424..7fda3477dc 100644 --- a/plenum/test/replica/test_backups_dont_order_while_reordering.py +++ b/plenum/test/replica/test_backups_dont_order_while_reordering.py @@ -3,7 +3,7 @@ from plenum.server.replica_helper import generateName from plenum.server.replicas import MASTER_REPLICA_INDEX from plenum.test.delayers import pDelay, cDelay, msg_req_delay, msg_rep_delay, old_view_pp_request_delay, ppDelay -from plenum.test.helper import sdk_send_random_and_check, sdk_send_random_requests +from plenum.test.helper import vdr_send_random_and_check, vdr_send_random_requests from plenum.test.stasher import delay_rules_without_processing, delay_rules from plenum.test.test_node import check_not_in_view_change from plenum.test.view_change.helper import ensure_view_change @@ -31,8 +31,8 @@ def check_req_queue(node, expected_req_count): def test_backups_dont_order_while_reordering(txnPoolNodeSet, - sdk_pool_handle, - sdk_wallet_client, + vdr_pool_handle, + vdr_wallet_client, looper): """ This test needs to show that for now we stop ordering on backups @@ -61,7 +61,7 @@ def check_pp_count(node, expected_count, inst_id=0): msg_req_delay(), msg_rep_delay(), ppDelay(instId=MASTER_REPLICA_INDEX)): - sdk_send_random_requests(looper, sdk_pool_handle, sdk_wallet_client, REQS_FOR_REORDERING) + vdr_send_random_requests(looper, vdr_pool_handle, vdr_wallet_client, REQS_FOR_REORDERING) looper.run(eventually(check_pp_count, delayed_node, REQS_FOR_REORDERING, BACKUP_INST_ID)) assert delayed_node.master_replica.last_ordered_3pc[1] == master_pp_seq_no_before with delay_rules([n.nodeIbStasher for n in txnPoolNodeSet], old_view_pp_request_delay()): @@ -82,6 +82,6 @@ def check_backup_primaries(): looper.run(eventually(check_backup_primaries)) - sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_client, REQS_FOR_REORDERING) + vdr_send_random_and_check(looper, txnPoolNodeSet, vdr_pool_handle, vdr_wallet_client, REQS_FOR_REORDERING) for node in txnPoolNodeSet: assert node.replicas._replicas[BACKUP_INST_ID].last_ordered_3pc[1] == 0 diff --git a/plenum/test/replica/test_catchup_after_replica_addition.py b/plenum/test/replica/test_catchup_after_replica_addition.py index e80885742a..b86c733efd 100644 --- a/plenum/test/replica/test_catchup_after_replica_addition.py +++ b/plenum/test/replica/test_catchup_after_replica_addition.py @@ -4,12 +4,12 @@ from plenum.common.throughput_measurements import RevivalSpikeResistantEMAThroughputMeasurement from plenum.common.util import getMaxFailures -from plenum.test.helper import sdk_send_random_and_check, assertExp, sdk_get_and_check_replies +from plenum.test.helper import vdr_send_random_and_check, assertExp, vdr_get_and_check_replies from plenum.test.node_catchup.helper import waitNodeDataEquality from plenum.test.pool_transactions.conftest import sdk_node_theta_added -from plenum.test.pool_transactions.helper import sdk_add_new_nym, prepare_new_node_data, prepare_node_request, \ - sdk_sign_and_send_prepared_request, create_and_start_new_node +from plenum.test.pool_transactions.helper import vdr_add_new_nym, prepare_new_node_data, vdr_prepare_node_request, \ + vdr_sign_and_send_prepared_request, create_and_start_new_node from plenum.test.test_node import checkNodesConnected, TestNode from stp_core.loop.eventually import eventually @@ -19,7 +19,7 @@ def _send_txn_for_creating_node(looper, sdk_pool_handle, sdk_wallet_steward, tdir, new_node_name, clientIp, clientPort, nodeIp, nodePort, bls_key, sigseed, key_proof): new_steward_name = "testClientSteward" - new_steward_wallet_handle = sdk_add_new_nym(looper, + new_steward_wallet_handle = vdr_add_new_nym(looper, sdk_pool_handle, sdk_wallet_steward, alias=new_steward_name, @@ -28,7 +28,7 @@ def _send_txn_for_creating_node(looper, sdk_pool_handle, sdk_wallet_steward, tdi # filling node request _, steward_did = new_steward_wallet_handle node_request = looper.loop.run_until_complete( - prepare_node_request(steward_did, + vdr_prepare_node_request(steward_did, new_node_name=new_node_name, clientIp=clientIp, clientPort=clientPort, @@ -40,18 +40,18 @@ def _send_txn_for_creating_node(looper, sdk_pool_handle, sdk_wallet_steward, tdi key_proof=key_proof)) # sending request using 'sdk_' functions - request_couple = sdk_sign_and_send_prepared_request(looper, new_steward_wallet_handle, + request_couple = vdr_sign_and_send_prepared_request(looper, new_steward_wallet_handle, sdk_pool_handle, node_request) # waiting for replies - sdk_get_and_check_replies(looper, [request_couple]) + vdr_get_and_check_replies(looper, [request_couple]) -def test_catchup_after_replica_addition(looper, sdk_pool_handle, txnPoolNodeSet, - sdk_wallet_steward, tdir, tconf, allPluginsPath): +def test_catchup_after_replica_addition(looper, vdr_pool_handle, txnPoolNodeSet, + vdr_wallet_steward, tdir, tconf, allPluginsPath): view_no = txnPoolNodeSet[-1].viewNo - sdk_send_random_and_check(looper, txnPoolNodeSet, - sdk_pool_handle, sdk_wallet_steward, 1) + vdr_send_random_and_check(looper, txnPoolNodeSet, + vdr_pool_handle, vdr_wallet_steward, 1) waitNodeDataEquality(looper, *txnPoolNodeSet) # create node @@ -64,7 +64,7 @@ def test_catchup_after_replica_addition(looper, sdk_pool_handle, txnPoolNodeSet, tconf=tconf, auto_start=True, plugin_path=allPluginsPath, nodeClass=TestNode) - _send_txn_for_creating_node(looper, sdk_pool_handle, sdk_wallet_steward, tdir, new_node_name, clientIp, + _send_txn_for_creating_node(looper, vdr_pool_handle, vdr_wallet_steward, tdir, new_node_name, clientIp, clientPort, nodeIp, nodePort, bls_key, sigseed, key_proof) txnPoolNodeSet.append(new_node) @@ -72,6 +72,6 @@ def test_catchup_after_replica_addition(looper, sdk_pool_handle, txnPoolNodeSet, looper.run(eventually(lambda: assertExp(n.viewNo == view_no + 1 for n in txnPoolNodeSet))) waitNodeDataEquality(looper, *txnPoolNodeSet) - sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle, - sdk_wallet_steward, 1) + vdr_send_random_and_check(looper, txnPoolNodeSet, vdr_pool_handle, + vdr_wallet_steward, 1) waitNodeDataEquality(looper, *txnPoolNodeSet, exclude_from_check=['check_last_ordered_3pc']) diff --git a/plenum/test/replica/test_catchup_after_replica_removing.py b/plenum/test/replica/test_catchup_after_replica_removing.py index 1b7a30dd39..3a222cc0b9 100644 --- a/plenum/test/replica/test_catchup_after_replica_removing.py +++ b/plenum/test/replica/test_catchup_after_replica_removing.py @@ -4,13 +4,13 @@ from plenum.common.throughput_measurements import RevivalSpikeResistantEMAThroughputMeasurement from plenum.common.util import getMaxFailures -from plenum.test.helper import sdk_send_random_and_check, assertExp, sdk_get_and_check_replies, waitForViewChange, \ +from plenum.test.helper import vdr_send_random_and_check, assertExp, vdr_get_and_check_replies, waitForViewChange, \ view_change_timeout from plenum.test.node_catchup.helper import waitNodeDataEquality from plenum.test.pool_transactions.conftest import sdk_node_theta_added -from plenum.test.pool_transactions.helper import sdk_add_new_nym, prepare_new_node_data, prepare_node_request, \ - sdk_sign_and_send_prepared_request, create_and_start_new_node, demote_node +from plenum.test.pool_transactions.helper import vdr_add_new_nym, prepare_new_node_data, vdr_prepare_node_request, \ + vdr_sign_and_send_prepared_request, create_and_start_new_node, demote_node from plenum.test.test_node import checkNodesConnected, TestNode, ensureElectionsDone from stp_core.loop.eventually import eventually @@ -23,16 +23,16 @@ def tconf(tconf): yield tconf -def test_catchup_after_replica_removing(looper, sdk_pool_handle, txnPoolNodeSet, - sdk_wallet_stewards, tdir, tconf, allPluginsPath): +def test_catchup_after_replica_removing(looper, vdr_pool_handle, txnPoolNodeSet, + vdr_wallet_stewards, tdir, tconf, allPluginsPath): view_no = txnPoolNodeSet[-1].viewNo - sdk_send_random_and_check(looper, txnPoolNodeSet, - sdk_pool_handle, sdk_wallet_stewards[0], 1) + vdr_send_random_and_check(looper, txnPoolNodeSet, + vdr_pool_handle, vdr_wallet_stewards[0], 1) waitNodeDataEquality(looper, *txnPoolNodeSet) index, node_for_demote = [(i, n) for i, n in enumerate(txnPoolNodeSet) if n.replicas[1].isPrimary][0] - sdk_wallet_steward = sdk_wallet_stewards[index] - demote_node(looper, sdk_wallet_steward, sdk_pool_handle, node_for_demote) + sdk_wallet_steward = vdr_wallet_stewards[index] + demote_node(looper, sdk_wallet_steward, vdr_pool_handle, node_for_demote) txnPoolNodeSet.pop(index) # we are expecting 2 view changes here since Beta is selected as a master Primary on view=1 @@ -42,6 +42,6 @@ def test_catchup_after_replica_removing(looper, sdk_pool_handle, txnPoolNodeSet, ensureElectionsDone(looper, txnPoolNodeSet, customTimeout=30) waitNodeDataEquality(looper, *txnPoolNodeSet) - sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle, - sdk_wallet_stewards[0], 1) + vdr_send_random_and_check(looper, txnPoolNodeSet, vdr_pool_handle, + vdr_wallet_stewards[0], 1) waitNodeDataEquality(looper, *txnPoolNodeSet) diff --git a/plenum/test/replica/test_consensus_dp_batches.py b/plenum/test/replica/test_consensus_dp_batches.py index d4b7907b9a..08f6bce80a 100644 --- a/plenum/test/replica/test_consensus_dp_batches.py +++ b/plenum/test/replica/test_consensus_dp_batches.py @@ -1,10 +1,10 @@ -from plenum.test.helper import sdk_send_random_request +from plenum.test.helper import vdr_send_random_request from stp_core.loop.eventually import eventually from plenum.test.delayers import ppDelay, pDelay from plenum.test.stasher import delay_rules -def test_check_cdp_pp_storages(looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_client): +def test_check_cdp_pp_storages(looper, txnPoolNodeSet, vdr_pool_handle, vdr_wallet_client): def check_all_empty(replica): assert not bool(replica._consensus_data.preprepared) assert not bool(replica._consensus_data.prepared) @@ -23,7 +23,7 @@ def operation_for_replicas(operation, node_set=txnPoolNodeSet): with delay_rules(node_stashers, pDelay()): with delay_rules(node_stashers, ppDelay()): - sdk_send_random_request(looper, sdk_pool_handle, sdk_wallet_client) + vdr_send_random_request(looper, vdr_pool_handle, vdr_wallet_client) looper.run(eventually(operation_for_replicas, check_all_empty, txnPoolNodeSet[1:])) looper.run(eventually(operation_for_replicas, check_preprepared_not_empty, txnPoolNodeSet[0:1])) looper.run(eventually(operation_for_replicas, check_preprepared_not_empty, txnPoolNodeSet)) diff --git a/plenum/test/replica/test_get_last_timestamp_from_state.py b/plenum/test/replica/test_get_last_timestamp_from_state.py index fcd11cef2d..0acf3f18a5 100644 --- a/plenum/test/replica/test_get_last_timestamp_from_state.py +++ b/plenum/test/replica/test_get_last_timestamp_from_state.py @@ -1,7 +1,7 @@ from plenum.common.constants import DOMAIN_LEDGER_ID, NYM from plenum.common.txn_util import get_txn_time from plenum.common.util import get_utc_epoch -from plenum.test.helper import sdk_send_random_and_check +from plenum.test.helper import vdr_send_random_and_check from plenum.test.node_catchup.helper import waitNodeDataEquality from plenum.test.pool_transactions.helper import disconnect_node_and_ensure_disconnected from plenum.test.test_node import get_master_primary_node, checkNodesConnected @@ -10,26 +10,26 @@ def test_get_last_ordered_timestamp_after_catchup(looper, txnPoolNodeSet, - sdk_pool_handle, - sdk_wallet_steward, + vdr_pool_handle, + vdr_wallet_steward, tconf, tdir, allPluginsPath): node_to_disconnect = txnPoolNodeSet[-1] - reply_before = sdk_send_random_and_check(looper, + reply_before = vdr_send_random_and_check(looper, txnPoolNodeSet, - sdk_pool_handle, - sdk_wallet_steward, + vdr_pool_handle, + vdr_wallet_steward, 1)[0][1] looper.runFor(2) disconnect_node_and_ensure_disconnected(looper, txnPoolNodeSet, node_to_disconnect) looper.removeProdable(name=node_to_disconnect.name) - reply = sdk_send_random_and_check(looper, + reply = vdr_send_random_and_check(looper, txnPoolNodeSet, - sdk_pool_handle, - sdk_wallet_steward, + vdr_pool_handle, + vdr_wallet_steward, 1)[0][1] node_to_disconnect = start_stopped_node(node_to_disconnect, looper, tconf, @@ -45,12 +45,12 @@ def test_get_last_ordered_timestamp_after_catchup(looper, def test_choose_ts_from_state(looper, txnPoolNodeSet, - sdk_pool_handle, - sdk_wallet_steward): - sdk_send_random_and_check(looper, + vdr_pool_handle, + vdr_wallet_steward): + vdr_send_random_and_check(looper, txnPoolNodeSet, - sdk_pool_handle, - sdk_wallet_steward, + vdr_pool_handle, + vdr_wallet_steward, 1) primary_node = get_master_primary_node(txnPoolNodeSet) excpected_ts = get_utc_epoch() + 30 @@ -58,9 +58,9 @@ def test_choose_ts_from_state(looper, req_handler.database_manager.ts_store.set(excpected_ts, req_handler.state.headHash) primary_node.master_replica._ordering_service.last_accepted_pre_prepare_time = None - reply = sdk_send_random_and_check(looper, + reply = vdr_send_random_and_check(looper, txnPoolNodeSet, - sdk_pool_handle, - sdk_wallet_steward, + vdr_pool_handle, + vdr_wallet_steward, 1)[0][1] assert abs(excpected_ts - int(get_txn_time(reply['result']))) < 3 diff --git a/plenum/test/replica/test_max_3pc_batches_in_flight.py b/plenum/test/replica/test_max_3pc_batches_in_flight.py index 97ac8d07b5..ff32cf68f5 100644 --- a/plenum/test/replica/test_max_3pc_batches_in_flight.py +++ b/plenum/test/replica/test_max_3pc_batches_in_flight.py @@ -1,7 +1,7 @@ import pytest from plenum.test.delayers import delay_3pc -from plenum.test.helper import max_3pc_batch_limits, sdk_send_random_requests, sdk_get_and_check_replies +from plenum.test.helper import max_3pc_batch_limits, vdr_send_random_requests, vdr_get_and_check_replies from plenum.test.node_catchup.helper import ensure_all_nodes_have_same_data from plenum.test.stasher import start_delaying, stop_delaying_and_process from stp_core.common.log import getlogger @@ -25,8 +25,8 @@ def tconf(tconf): def test_max_3pc_batches_in_flight(tdir, tconf, looper, txnPoolNodeSet, - sdk_pool_handle, - sdk_wallet_client): + vdr_pool_handle, + vdr_wallet_client): # Check pool initial state initial_3pc = txnPoolNodeSet[0].master_replica.last_ordered_3pc for node in txnPoolNodeSet[1:]: @@ -49,7 +49,7 @@ def check_ordered_till(pp_seq_no: int): delayers.append((pp_seq_no, delayer)) # Send a number of requests - reqs = sdk_send_random_requests(looper, sdk_pool_handle, sdk_wallet_client, BATCHES_TO_ORDER) + reqs = vdr_send_random_requests(looper, vdr_pool_handle, vdr_wallet_client, BATCHES_TO_ORDER) # Continuously check number of batches in flight for pp_seq_no, delayer in delayers: @@ -62,7 +62,7 @@ def check_ordered_till(pp_seq_no: int): assert batches_in_flight <= MAX_BATCHES_IN_FLIGHT # Check all requests are ordered - sdk_get_and_check_replies(looper, reqs) + vdr_get_and_check_replies(looper, reqs) # Ensure that all nodes will eventually have same data ensure_all_nodes_have_same_data(looper, txnPoolNodeSet) diff --git a/plenum/test/replica/test_monitor_reset_after_replica_addition.py b/plenum/test/replica/test_monitor_reset_after_replica_addition.py index 6098c3f561..2688116631 100644 --- a/plenum/test/replica/test_monitor_reset_after_replica_addition.py +++ b/plenum/test/replica/test_monitor_reset_after_replica_addition.py @@ -3,7 +3,7 @@ from plenum.common.throughput_measurements import RevivalSpikeResistantEMAThroughputMeasurement from plenum.common.util import getMaxFailures -from plenum.test.helper import sdk_send_random_and_check, assertExp +from plenum.test.helper import vdr_send_random_and_check, assertExp from plenum.test.node_catchup.helper import waitNodeDataEquality from plenum.test.pool_transactions.conftest import sdk_node_theta_added @@ -29,16 +29,16 @@ def tconf(tconf): tconf.throughput_measurement_params = old_throughput_measurement_params -def test_monitor_reset_after_replica_addition(looper, sdk_pool_handle, txnPoolNodeSet, - sdk_wallet_steward, tdir, tconf, allPluginsPath): +def test_monitor_reset_after_replica_addition(looper, vdr_pool_handle, txnPoolNodeSet, + vdr_wallet_steward, tdir, tconf, allPluginsPath): view_no = txnPoolNodeSet[-1].viewNo - sdk_send_random_and_check(looper, txnPoolNodeSet, - sdk_pool_handle, sdk_wallet_steward, 30) + vdr_send_random_and_check(looper, txnPoolNodeSet, + vdr_pool_handle, vdr_wallet_steward, 30) waitNodeDataEquality(looper, *txnPoolNodeSet) last_ordered = txnPoolNodeSet[-1].master_last_ordered_3PC sdk_node_theta_added(looper, txnPoolNodeSet, tdir, tconf, - sdk_pool_handle, sdk_wallet_steward, allPluginsPath) + vdr_pool_handle, vdr_wallet_steward, allPluginsPath) looper.runFor(tconf.throughput_measurement_params['window_size'] * tconf.throughput_measurement_params['min_cnt']) node = txnPoolNodeSet[0] diff --git a/plenum/test/replica/test_replica_clear_collections_after_view_change.py b/plenum/test/replica/test_replica_clear_collections_after_view_change.py index 69fec85b7d..353a3e2744 100644 --- a/plenum/test/replica/test_replica_clear_collections_after_view_change.py +++ b/plenum/test/replica/test_replica_clear_collections_after_view_change.py @@ -7,7 +7,7 @@ from plenum.test.stasher import delay_rules from plenum.test.view_change_service.helper import trigger_view_change from stp_core.loop.eventually import eventually -from plenum.test.helper import sdk_send_random_and_check, sdk_send_batches_of_random_and_check, \ +from plenum.test.helper import vdr_send_random_and_check, vdr_send_batches_of_random_and_check, \ waitForViewChange, max_3pc_batch_limits from plenum.test.checkpoints.conftest import chkFreqPatched, reqs_for_checkpoint @@ -23,12 +23,12 @@ def tconf(tconf): @pytest.mark.skip(reason="With new view change we don't clear requests") def test_replica_clear_collections_after_view_change(looper, txnPoolNodeSet, - sdk_pool_handle, - sdk_wallet_client, + vdr_pool_handle, + vdr_wallet_client, tconf, tdir, allPluginsPath, - sdk_wallet_steward, + vdr_wallet_steward, chkFreqPatched, reqs_for_checkpoint): """ @@ -42,8 +42,8 @@ def test_replica_clear_collections_after_view_change(looper, stashers = [n.nodeIbStasher for n in txnPoolNodeSet] with delay_rules(stashers, cDelay(delay=sys.maxsize, instId=1)): - sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle, - sdk_wallet_steward, 1) + vdr_send_random_and_check(looper, txnPoolNodeSet, vdr_pool_handle, + vdr_wallet_steward, 1) trigger_view_change(txnPoolNodeSet) @@ -51,10 +51,10 @@ def test_replica_clear_collections_after_view_change(looper, customTimeout=2 * tconf.NEW_VIEW_TIMEOUT) # + 1 because of lastPrePrepareSeqNo was not dropped after view_change - sdk_send_batches_of_random_and_check(looper, + vdr_send_batches_of_random_and_check(looper, txnPoolNodeSet, - sdk_pool_handle, - sdk_wallet_client, + vdr_pool_handle, + vdr_wallet_client, num_reqs=reqs_for_checkpoint + 1) def check_request_queues(): diff --git a/plenum/test/replica/test_replica_received_preprepare_with_unknown_request.py b/plenum/test/replica/test_replica_received_preprepare_with_unknown_request.py index bdcc2fa5bb..d4ac917e73 100644 --- a/plenum/test/replica/test_replica_received_preprepare_with_unknown_request.py +++ b/plenum/test/replica/test_replica_received_preprepare_with_unknown_request.py @@ -6,7 +6,7 @@ from plenum.test.node_request.test_propagate.helper import sum_of_request_propagates from plenum.test.stasher import delay_rules from stp_core.common.log import getlogger -from plenum.test.helper import sdk_send_random_and_check +from plenum.test.helper import vdr_send_random_and_check from plenum.test.checkpoints.conftest import chkFreqPatched from plenum.test.replica.helper import register_pp_ts @@ -16,11 +16,11 @@ def test_replica_received_preprepare_with_ordered_request(looper, txnPoolNodeSet, - sdk_pool_handle, - sdk_wallet_steward, + vdr_pool_handle, + vdr_wallet_steward, chkFreqPatched): - sdk_send_random_and_check(looper, txnPoolNodeSet, - sdk_pool_handle, sdk_wallet_steward, 1) + vdr_send_random_and_check(looper, txnPoolNodeSet, + vdr_pool_handle, vdr_wallet_steward, 1) replica = txnPoolNodeSet[1].master_replica params = replica._ordering_service.spylog.getLastParams(OrderingService.process_preprepare) @@ -43,19 +43,19 @@ def discard(offendingMsg, reason, logger, cliOutput=False): def test_replica_received_preprepare_with_unknown_request(looper, txnPoolNodeSet, - sdk_pool_handle, - sdk_wallet_steward, + vdr_pool_handle, + vdr_wallet_steward, chkFreqPatched, tconf): - sdk_send_random_and_check(looper, txnPoolNodeSet, - sdk_pool_handle, sdk_wallet_steward, 1) + vdr_send_random_and_check(looper, txnPoolNodeSet, + vdr_pool_handle, vdr_wallet_steward, 1) slow_nodes = txnPoolNodeSet[2:] nodes_stashers = [n.nodeIbStasher for n in slow_nodes] slow_replica_1 = txnPoolNodeSet[2].master_replica slow_replica_2 = txnPoolNodeSet[3].master_replica with delay_rules(nodes_stashers, ppgDelay()): - sdk_send_random_and_check(looper, txnPoolNodeSet, - sdk_pool_handle, sdk_wallet_steward, 1) + vdr_send_random_and_check(looper, txnPoolNodeSet, + vdr_pool_handle, vdr_wallet_steward, 1) params1 = slow_replica_1._ordering_service.spylog.getLastParams(OrderingService.process_preprepare) pp1 = params1["pre_prepare"] diff --git a/plenum/test/replica/test_replica_reject_same_pre_prepare.py b/plenum/test/replica/test_replica_reject_same_pre_prepare.py index 393adf12f5..e3550fb5ca 100644 --- a/plenum/test/replica/test_replica_reject_same_pre_prepare.py +++ b/plenum/test/replica/test_replica_reject_same_pre_prepare.py @@ -9,8 +9,8 @@ from plenum.test import waits from plenum.test.helper import checkPrePrepareReqSent, \ checkPrePrepareReqRecvd, \ - checkPrepareReqSent, sdk_send_random_requests, \ - sdk_json_to_request_object, sdk_get_replies, init_discarded + checkPrepareReqSent, vdr_send_random_requests, \ + vdr_json_to_request_object, vdr_get_replies, init_discarded from plenum.test.test_node import getNonPrimaryReplicas, getPrimaryReplica whitelist = ['doing nothing for now', @@ -21,7 +21,7 @@ # noinspection PyIncorrectDocstring -def testReplicasRejectSamePrePrepareMsg(looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_client): +def testReplicasRejectSamePrePrepareMsg(looper, txnPoolNodeSet, vdr_pool_handle, vdr_wallet_client): """ Replicas should not accept PRE-PREPARE for view "v" and prepare sequence number "n" if it has already accepted a request with view number "v" and @@ -40,11 +40,11 @@ def testReplicasRejectSamePrePrepareMsg(looper, txnPoolNodeSet, sdk_pool_handle, for node in txnPoolNodeSet: node.nodeIbStasher.delay(cDelay(delay=c_delay, instId=1)) - req1 = sdk_send_random_requests(looper, - sdk_pool_handle, - sdk_wallet_client, + req1 = vdr_send_random_requests(looper, + vdr_pool_handle, + vdr_wallet_client, 1)[0] - request1 = sdk_json_to_request_object(req1[0]) + request1 = vdr_json_to_request_object(req1[0]) for npr in nonPrimaryReplicas: looper.run(eventually(checkPrepareReqSent, npr, @@ -72,10 +72,10 @@ def testReplicasRejectSamePrePrepareMsg(looper, txnPoolNodeSet, sdk_pool_handle, "one...") primaryRepl._ordering_service._lastPrePrepareSeqNo -= 1 view_no = primaryRepl.viewNo - request2 = sdk_json_to_request_object( - sdk_send_random_requests(looper, - sdk_pool_handle, - sdk_wallet_client, + request2 = vdr_json_to_request_object( + vdr_send_random_requests(looper, + vdr_pool_handle, + vdr_wallet_client, 1)[0][0]) timeout = waits.expectedPrePrepareTime(len(txnPoolNodeSet)) looper.run(eventually(checkPrePrepareReqSent, primaryRepl, request2, @@ -128,5 +128,5 @@ def testReplicasRejectSamePrePrepareMsg(looper, txnPoolNodeSet, sdk_pool_handle, timeout=timeout)) timeout = waits.expectedTransactionExecutionTime(len(txnPoolNodeSet)) + c_delay - result1 = sdk_get_replies(looper, [req1])[0][1] + result1 = vdr_get_replies(looper, [req1])[0][1] logger.debug("request {} gives result {}".format(request1, result1)) diff --git a/plenum/test/replica/test_revert_from_malicious.py b/plenum/test/replica/test_revert_from_malicious.py index 2f96bcb774..55e9c896b3 100644 --- a/plenum/test/replica/test_revert_from_malicious.py +++ b/plenum/test/replica/test_revert_from_malicious.py @@ -1,14 +1,14 @@ import pytest from plenum.common.exceptions import InvalidClientMessageException, RequestRejectedException -from plenum.test.helper import sdk_send_random_and_check +from plenum.test.helper import vdr_send_random_and_check from plenum.test.test_node import getPrimaryReplica def test_revert_pp_from_malicious(looper, txnPoolNodeSet, - sdk_pool_handle, - sdk_wallet_client): + vdr_pool_handle, + vdr_wallet_client): def raise_invalid_ex(): raise InvalidClientMessageException(1, 2, "3") malicious_primary = getPrimaryReplica(txnPoolNodeSet).node @@ -16,8 +16,8 @@ def raise_invalid_ex(): for n in not_malicious_nodes: n.master_replica._ordering_service._do_dynamic_validation = lambda *args, **kwargs: raise_invalid_ex() with pytest.raises(RequestRejectedException, match="client request invalid"): - sdk_send_random_and_check(looper, + vdr_send_random_and_check(looper, txnPoolNodeSet, - sdk_pool_handle, - sdk_wallet_client, + vdr_pool_handle, + vdr_wallet_client, 1) diff --git a/plenum/test/replica_removing/helper.py b/plenum/test/replica_removing/helper.py index a2c5dc0382..4e2ec526c8 100644 --- a/plenum/test/replica_removing/helper.py +++ b/plenum/test/replica_removing/helper.py @@ -3,7 +3,7 @@ from plenum.common.throughput_measurements import RevivalSpikeResistantEMAThroughputMeasurement from plenum.test.delayers import cDelay -from plenum.test.helper import sdk_send_batches_of_random_and_check, waitForViewChange, \ +from plenum.test.helper import vdr_send_batches_of_random_and_check, waitForViewChange, \ acc_monitor from plenum.test.stasher import delay_rules from plenum.test.test_node import ensureElectionsDone @@ -64,7 +64,7 @@ def do_test_replica_removing_with_backup_degraded(looper, instance_to_remove = 1 stashers = [node.nodeIbStasher for node in txnPoolNodeSet] with delay_rules(stashers, cDelay(delay=sys.maxsize, instId=instance_to_remove)): - sdk_send_batches_of_random_and_check(looper, + vdr_send_batches_of_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_client, diff --git a/plenum/test/replica_removing/test_replica_removing.py b/plenum/test/replica_removing/test_replica_removing.py index 9edfe39bf2..95ec34bff0 100644 --- a/plenum/test/replica_removing/test_replica_removing.py +++ b/plenum/test/replica_removing/test_replica_removing.py @@ -8,15 +8,15 @@ from plenum.test.delayers import cDelay, msg_rep_delay from plenum.test.node_catchup.helper import ensure_all_nodes_have_same_data from plenum.test.node_catchup.test_config_ledger import start_stopped_node -from plenum.test.node_request.helper import sdk_ensure_pool_functional +from plenum.test.node_request.helper import vdr_ensure_pool_functional from plenum.test.pool_transactions.helper import disconnect_node_and_ensure_disconnected from plenum.test.replica_removing.helper import check_replica_removed from plenum.test.stasher import delay_rules from plenum.test.view_change.helper import ensure_view_change from stp_core.loop.eventually import eventually from stp_core.common.log import getlogger -from plenum.test.helper import sdk_send_random_requests, sdk_get_replies, sdk_send_random_and_check, waitForViewChange, \ - freshness, sdk_send_batches_of_random_and_check, get_pp_seq_no, assertExp +from plenum.test.helper import vdr_send_random_requests, vdr_get_replies, vdr_send_random_and_check, waitForViewChange, \ + freshness, vdr_send_batches_of_random_and_check, get_pp_seq_no, assertExp from plenum.test.test_node import ensureElectionsDone, checkNodesConnected, \ get_master_primary_node, get_last_master_non_primary_node @@ -54,8 +54,8 @@ def get_forwarded_to_all(node, is_ordered=False): def test_primary_after_replica_restored(looper, txnPoolNodeSet, - sdk_pool_handle, - sdk_wallet_client, + vdr_pool_handle, + vdr_wallet_client, chkFreqPatched, view_change): A, B, C, D = txnPoolNodeSet @@ -63,20 +63,20 @@ def test_primary_after_replica_restored(looper, assert C.replicas._replicas[1].isPrimary D.replicas.remove_replica(1) - sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_client, 2 * CHK_FREQ) + vdr_send_random_and_check(looper, txnPoolNodeSet, vdr_pool_handle, vdr_wallet_client, 2 * CHK_FREQ) do_view_change(txnPoolNodeSet, looper) batches_before = D.replicas._replicas[1].last_ordered_3pc[1] assert D.replicas._replicas[1].isPrimary - sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_client, 2 * CHK_FREQ) + vdr_send_random_and_check(looper, txnPoolNodeSet, vdr_pool_handle, vdr_wallet_client, 2 * CHK_FREQ) batches_after = D.replicas._replicas[1].last_ordered_3pc[1] assert batches_after > batches_before def test_replica_removal(looper, txnPoolNodeSet, - sdk_pool_handle, - sdk_wallet_client, + vdr_pool_handle, + vdr_wallet_client, chkFreqPatched, view_change): @@ -90,23 +90,23 @@ def test_replica_removal(looper, def test_replica_removal_does_not_cause_master_degradation( - looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_client, + looper, txnPoolNodeSet, vdr_pool_handle, vdr_wallet_client, chkFreqPatched, view_change): node = txnPoolNodeSet[0] - sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_client, 5) + vdr_send_random_and_check(looper, txnPoolNodeSet, vdr_pool_handle, vdr_wallet_client, 5) node.replicas.remove_replica(node.replicas.num_replicas - 1) assert not node.monitor.isMasterDegraded() - sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_client, 5) + vdr_send_random_and_check(looper, txnPoolNodeSet, vdr_pool_handle, vdr_wallet_client, 5) assert not node.monitor.isMasterDegraded() def test_removed_replica_restored_on_view_change( - looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_client, + looper, txnPoolNodeSet, vdr_pool_handle, vdr_wallet_client, tconf, tdir, allPluginsPath, chkFreqPatched, view_change): """ 1. Remove replica on some node which is not master primary @@ -142,16 +142,16 @@ def test_removed_replica_restored_on_view_change( def test_ordered_request_freed_on_replica_removal(looper, txnPoolNodeSet, - sdk_pool_handle, - sdk_wallet_client, + vdr_pool_handle, + vdr_wallet_client, chkFreqPatched, view_change): node = txnPoolNodeSet[0] - sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_client, 2) + vdr_send_random_and_check(looper, txnPoolNodeSet, vdr_pool_handle, vdr_wallet_client, 2) old_stable_checkpoint = node.master_replica._consensus_data.stable_checkpoint with delay_rules(node.nodeIbStasher, cDelay(), msg_rep_delay(types_to_delay=[COMMIT])): - sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_client, 1) + vdr_send_random_and_check(looper, txnPoolNodeSet, vdr_pool_handle, vdr_wallet_client, 1) f_d, f_r = get_forwarded_to_all(node) assert f_d @@ -162,29 +162,29 @@ def test_ordered_request_freed_on_replica_removal(looper, ensure_all_nodes_have_same_data(looper, txnPoolNodeSet, exclude_from_check=['check_primaries']) # Send one more request to stabilize checkpoint - sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_client, + vdr_send_random_and_check(looper, txnPoolNodeSet, vdr_pool_handle, vdr_wallet_client, CHK_FREQ - 1) looper.run(eventually(check_for_nodes, txnPoolNodeSet, check_stable_checkpoint, old_stable_checkpoint + CHK_FREQ)) def test_unordered_request_freed_on_replica_removal(looper, txnPoolNodeSet, - sdk_pool_handle, - sdk_wallet_client, + vdr_pool_handle, + vdr_wallet_client, chkFreqPatched, view_change): node = txnPoolNodeSet[0] # Stabilize checkpoint # Send one more request to stabilize checkpoint - sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_client, + vdr_send_random_and_check(looper, txnPoolNodeSet, vdr_pool_handle, vdr_wallet_client, CHK_FREQ - get_pp_seq_no(txnPoolNodeSet) % CHK_FREQ) old_stable_checkpoint = node.master_replica._consensus_data.stable_checkpoint stashers = [n.nodeIbStasher for n in txnPoolNodeSet] with delay_rules(stashers, cDelay(delay=sys.maxsize), msg_rep_delay(types_to_delay=[COMMIT])): - req = sdk_send_random_requests(looper, - sdk_pool_handle, - sdk_wallet_client, + req = vdr_send_random_requests(looper, + vdr_pool_handle, + vdr_wallet_client, 1) looper.runFor(waits.expectedPropagateTime(len(txnPoolNodeSet)) + waits.expectedPrePrepareTime(len(txnPoolNodeSet)) + @@ -198,11 +198,11 @@ def test_unordered_request_freed_on_replica_removal(looper, assert node.requests[f_d].forwardedTo == node.replicas.num_replicas check_for_nodes(txnPoolNodeSet, check_stable_checkpoint, old_stable_checkpoint) - sdk_get_replies(looper, req) + vdr_get_replies(looper, req) check_for_nodes(txnPoolNodeSet, check_stable_checkpoint, old_stable_checkpoint) # Send one more request to stabilize checkpoint - sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_client, CHK_FREQ - 1) + vdr_send_random_and_check(looper, txnPoolNodeSet, vdr_pool_handle, vdr_wallet_client, CHK_FREQ - 1) looper.run(eventually(check_for_nodes, txnPoolNodeSet, diff --git a/plenum/test/replica_removing/test_replica_removing_after_node_started.py b/plenum/test/replica_removing/test_replica_removing_after_node_started.py index cb94f44207..fd42bcdc5b 100644 --- a/plenum/test/replica_removing/test_replica_removing_after_node_started.py +++ b/plenum/test/replica_removing/test_replica_removing_after_node_started.py @@ -4,7 +4,7 @@ from plenum.test.node_catchup.helper import waitNodeDataEquality from plenum.test.node_catchup.test_config_ledger import start_stopped_node -from plenum.test.pool_transactions.helper import disconnect_node_and_ensure_disconnected, sdk_add_new_steward_and_node +from plenum.test.pool_transactions.helper import disconnect_node_and_ensure_disconnected, vdr_add_new_steward_and_node from plenum.test.replica_removing.helper import check_replica_removed from plenum.test.view_change_service.helper import trigger_view_change from stp_core.loop.eventually import eventually @@ -30,12 +30,12 @@ def tconf(tconf): def test_replica_removing_after_node_started(looper, txnPoolNodeSet, - sdk_pool_handle, - sdk_wallet_client, + vdr_pool_handle, + vdr_wallet_client, tconf, tdir, allPluginsPath, - sdk_wallet_steward): + vdr_wallet_steward): """ 1. Remove backup primary node. 2. Check that replicas with the disconnected primary were removed. @@ -66,9 +66,9 @@ def check_replica_removed_on_all_nodes(inst_id=instance_to_remove): looper.run(eventually(check_replica_removed_on_all_nodes, timeout=tconf.TolerateBackupPrimaryDisconnection * 2)) - new_steward_wallet, new_node = sdk_add_new_steward_and_node(looper, - sdk_pool_handle, - sdk_wallet_steward, + new_steward_wallet, new_node = vdr_add_new_steward_and_node(looper, + vdr_pool_handle, + vdr_wallet_steward, "test_steward", "test_node", tdir, diff --git a/plenum/test/replica_removing/test_replica_removing_after_view_change.py b/plenum/test/replica_removing/test_replica_removing_after_view_change.py index bb01ad3d5c..cc78e3f136 100644 --- a/plenum/test/replica_removing/test_replica_removing_after_view_change.py +++ b/plenum/test/replica_removing/test_replica_removing_after_view_change.py @@ -21,8 +21,8 @@ def tconf(tconf): def test_replica_removing_after_view_change(looper, txnPoolNodeSet, - sdk_pool_handle, - sdk_wallet_client, + vdr_pool_handle, + vdr_wallet_client, tconf, tdir, allPluginsPath): diff --git a/plenum/test/replica_removing/test_replica_removing_with_backup_degraded_acc_local.py b/plenum/test/replica_removing/test_replica_removing_with_backup_degraded_acc_local.py index c42e6317bd..bcbbe851d3 100644 --- a/plenum/test/replica_removing/test_replica_removing_with_backup_degraded_acc_local.py +++ b/plenum/test/replica_removing/test_replica_removing_with_backup_degraded_acc_local.py @@ -11,14 +11,14 @@ def tconf(tconf): def test_replica_removing_with_backup_degraded(looper, txnPoolNodeSet, - sdk_pool_handle, - sdk_wallet_client, - sdk_wallet_steward, + vdr_pool_handle, + vdr_wallet_client, + vdr_wallet_steward, tconf, tdir, allPluginsPath): do_test_replica_removing_with_backup_degraded(looper, txnPoolNodeSet, - sdk_pool_handle, - sdk_wallet_client, + vdr_pool_handle, + vdr_wallet_client, tconf) diff --git a/plenum/test/replica_removing/test_replica_removing_with_backup_degraded_acc_quorum.py b/plenum/test/replica_removing/test_replica_removing_with_backup_degraded_acc_quorum.py index 1dc2493a80..8e7ca6e792 100644 --- a/plenum/test/replica_removing/test_replica_removing_with_backup_degraded_acc_quorum.py +++ b/plenum/test/replica_removing/test_replica_removing_with_backup_degraded_acc_quorum.py @@ -11,14 +11,14 @@ def tconf(tconf): def test_replica_removing_with_backup_degraded(looper, txnPoolNodeSet, - sdk_pool_handle, - sdk_wallet_client, - sdk_wallet_steward, + vdr_pool_handle, + vdr_wallet_client, + vdr_wallet_steward, tconf, tdir, allPluginsPath): do_test_replica_removing_with_backup_degraded(looper, txnPoolNodeSet, - sdk_pool_handle, - sdk_wallet_client, + vdr_pool_handle, + vdr_wallet_client, tconf) diff --git a/plenum/test/replica_removing/test_replica_removing_with_backup_degraded_local.py b/plenum/test/replica_removing/test_replica_removing_with_backup_degraded_local.py index b7190bf04b..ec33d60a6f 100644 --- a/plenum/test/replica_removing/test_replica_removing_with_backup_degraded_local.py +++ b/plenum/test/replica_removing/test_replica_removing_with_backup_degraded_local.py @@ -11,14 +11,14 @@ def tconf(tconf): def test_replica_removing_with_backup_degraded(looper, txnPoolNodeSet, - sdk_pool_handle, - sdk_wallet_client, - sdk_wallet_steward, + vdr_pool_handle, + vdr_wallet_client, + vdr_wallet_steward, tconf, tdir, allPluginsPath): do_test_replica_removing_with_backup_degraded(looper, txnPoolNodeSet, - sdk_pool_handle, - sdk_wallet_client, + vdr_pool_handle, + vdr_wallet_client, tconf) diff --git a/plenum/test/replica_removing/test_replica_removing_with_backup_degraded_quorum.py b/plenum/test/replica_removing/test_replica_removing_with_backup_degraded_quorum.py index e28c2f62b8..18aa31f0e5 100644 --- a/plenum/test/replica_removing/test_replica_removing_with_backup_degraded_quorum.py +++ b/plenum/test/replica_removing/test_replica_removing_with_backup_degraded_quorum.py @@ -11,14 +11,14 @@ def tconf(tconf): def test_replica_removing_with_backup_degraded(looper, txnPoolNodeSet, - sdk_pool_handle, - sdk_wallet_client, - sdk_wallet_steward, + vdr_pool_handle, + vdr_wallet_client, + vdr_wallet_steward, tconf, tdir, allPluginsPath): do_test_replica_removing_with_backup_degraded(looper, txnPoolNodeSet, - sdk_pool_handle, - sdk_wallet_client, + vdr_pool_handle, + vdr_wallet_client, tconf) diff --git a/plenum/test/replica_removing/test_replica_removing_with_primary_disconnected.py b/plenum/test/replica_removing/test_replica_removing_with_primary_disconnected.py index 5ca8c7cff5..27252267e3 100644 --- a/plenum/test/replica_removing/test_replica_removing_with_primary_disconnected.py +++ b/plenum/test/replica_removing/test_replica_removing_with_primary_disconnected.py @@ -19,8 +19,8 @@ def tconf(tconf): def test_replica_removing_with_primary_disconnected(looper, txnPoolNodeSet, - sdk_pool_handle, - sdk_wallet_client, + vdr_pool_handle, + vdr_wallet_client, tconf, tdir, allPluginsPath): diff --git a/plenum/test/req_drop/test_req_drop_on_commit_phase_non_primary.py b/plenum/test/req_drop/test_req_drop_on_commit_phase_non_primary.py index 36046866d2..cf5ad455e1 100644 --- a/plenum/test/req_drop/test_req_drop_on_commit_phase_non_primary.py +++ b/plenum/test/req_drop/test_req_drop_on_commit_phase_non_primary.py @@ -1,13 +1,13 @@ import pytest -from plenum.test.helper import sdk_send_random_requests +from plenum.test.helper import vdr_send_random_requests from stp_core.loop.eventually import eventually from plenum.common.messages.node_messages import Commit from plenum.test.delayers import delay from plenum.test.propagate.helper import recvdRequest, recvdPropagate, \ sentPropagate, recvdPrepareForInstId, recvdCommitForInstId from plenum.test.test_node import TestNode -from plenum.test.node_request.helper import sdk_ensure_pool_functional +from plenum.test.node_request.helper import vdr_ensure_pool_functional howlong = 20 @@ -34,15 +34,15 @@ def tconf(tconf): @pytest.fixture() -def setup(txnPoolNodeSet, looper, sdk_pool_handle, sdk_wallet_client): +def setup(txnPoolNodeSet, looper, vdr_pool_handle, vdr_wallet_client): global initial_ledger_size A, B, C, D = txnPoolNodeSet # type: TestNode lagged_node = C frm = [A, B, D] delay(Commit, frm=frm, to=lagged_node, howlong=howlong) initial_ledger_size = lagged_node.domainLedger.size - request_couple_json = sdk_send_random_requests( - looper, sdk_pool_handle, sdk_wallet_client, 1) + request_couple_json = vdr_send_random_requests( + looper, vdr_pool_handle, vdr_wallet_client, 1) return request_couple_json @@ -53,7 +53,7 @@ def setup(txnPoolNodeSet, looper, sdk_pool_handle, sdk_wallet_client): # before all Prepares received. def test_req_drop_on_commit_phase_on_non_primary_and_then_ordered( tconf, setup, looper, txnPoolNodeSet, - sdk_wallet_client, sdk_pool_handle): + vdr_wallet_client, vdr_pool_handle): global initial_ledger_size A, B, C, D = txnPoolNodeSet # type: TestNode lagged_node = C @@ -104,4 +104,4 @@ def check_ledger_size(): looper.run(eventually(check_ledger_size, retryWait=.5, timeout=timeout)) - sdk_ensure_pool_functional(looper, txnPoolNodeSet, sdk_wallet_client, sdk_pool_handle) + vdr_ensure_pool_functional(looper, txnPoolNodeSet, vdr_wallet_client, vdr_pool_handle) diff --git a/plenum/test/req_drop/test_req_drop_on_commit_phase_primary.py b/plenum/test/req_drop/test_req_drop_on_commit_phase_primary.py index 32a9f1aea2..396f3436b4 100644 --- a/plenum/test/req_drop/test_req_drop_on_commit_phase_primary.py +++ b/plenum/test/req_drop/test_req_drop_on_commit_phase_primary.py @@ -1,13 +1,13 @@ import pytest -from plenum.test.helper import sdk_send_random_requests +from plenum.test.helper import vdr_send_random_requests from stp_core.loop.eventually import eventually from plenum.common.messages.node_messages import Commit from plenum.test.delayers import delay from plenum.test.propagate.helper import recvdRequest, recvdPropagate, \ sentPropagate, recvdPrepareForInstId, recvdCommitForInstId from plenum.test.test_node import TestNode -from plenum.test.node_request.helper import sdk_ensure_pool_functional +from plenum.test.node_request.helper import vdr_ensure_pool_functional howlong = 20 @@ -34,15 +34,15 @@ def tconf(tconf): @pytest.fixture() -def setup(txnPoolNodeSet, looper, sdk_pool_handle, sdk_wallet_client): +def setup(txnPoolNodeSet, looper, vdr_pool_handle, vdr_wallet_client): global initial_ledger_size A, B, C, D = txnPoolNodeSet # type: TestNode lagged_node = A frm = [B, C, D] delay(Commit, frm=frm, to=lagged_node, howlong=howlong) initial_ledger_size = lagged_node.domainLedger.size - request_couple_json = sdk_send_random_requests( - looper, sdk_pool_handle, sdk_wallet_client, 1) + request_couple_json = vdr_send_random_requests( + looper, vdr_pool_handle, vdr_wallet_client, 1) return request_couple_json @@ -53,7 +53,7 @@ def setup(txnPoolNodeSet, looper, sdk_pool_handle, sdk_wallet_client): # before all Prepares received. def test_req_drop_on_commit_phase_on_master_primary_and_then_ordered( tconf, setup, looper, txnPoolNodeSet, - sdk_wallet_client, sdk_pool_handle): + vdr_wallet_client, vdr_pool_handle): global initial_ledger_size A, B, C, D = txnPoolNodeSet # type: TestNode lagged_node = A @@ -104,4 +104,4 @@ def check_ledger_size(): looper.run(eventually(check_ledger_size, retryWait=.5, timeout=timeout)) - sdk_ensure_pool_functional(looper, txnPoolNodeSet, sdk_wallet_client, sdk_pool_handle) + vdr_ensure_pool_functional(looper, txnPoolNodeSet, vdr_wallet_client, vdr_pool_handle) diff --git a/plenum/test/req_drop/test_req_drop_on_prepare_phase_non_primary.py b/plenum/test/req_drop/test_req_drop_on_prepare_phase_non_primary.py index 5a8e659919..ef71698413 100644 --- a/plenum/test/req_drop/test_req_drop_on_prepare_phase_non_primary.py +++ b/plenum/test/req_drop/test_req_drop_on_prepare_phase_non_primary.py @@ -1,13 +1,13 @@ import pytest -from plenum.test.helper import sdk_send_random_requests +from plenum.test.helper import vdr_send_random_requests from stp_core.loop.eventually import eventually from plenum.common.messages.node_messages import Prepare, Commit from plenum.test.delayers import delay from plenum.test.propagate.helper import recvdRequest, recvdPropagate, \ sentPropagate, recvdPrepareForInstId, recvdCommitForInstId from plenum.test.test_node import TestNode -from plenum.test.node_request.helper import sdk_ensure_pool_functional +from plenum.test.node_request.helper import vdr_ensure_pool_functional howlong = 20 @@ -35,7 +35,7 @@ def tconf(tconf): @pytest.fixture() -def setup(txnPoolNodeSet, looper, sdk_pool_handle, sdk_wallet_client): +def setup(txnPoolNodeSet, looper, vdr_pool_handle, vdr_wallet_client): global initial_ledger_size A, B, C, D = txnPoolNodeSet # type: TestNode lagged_node = C @@ -43,14 +43,14 @@ def setup(txnPoolNodeSet, looper, sdk_pool_handle, sdk_wallet_client): delay(Prepare, frm=frm, to=lagged_node, howlong=howlong) delay(Commit, frm=frm, to=lagged_node, howlong=howlong + 3) initial_ledger_size = txnPoolNodeSet[0].domainLedger.size - request_couple_json = sdk_send_random_requests( - looper, sdk_pool_handle, sdk_wallet_client, 1) + request_couple_json = vdr_send_random_requests( + looper, vdr_pool_handle, vdr_wallet_client, 1) return request_couple_json def test_req_drop_on_prepare_phase_on_non_primary_and_then_ordered( tconf, setup, looper, txnPoolNodeSet, - sdk_wallet_client, sdk_pool_handle): + vdr_wallet_client, vdr_pool_handle): global initial_ledger_size A, B, C, D = txnPoolNodeSet # type: TestNode lagged_node = C @@ -96,4 +96,4 @@ def check_ledger_size(): looper.run(eventually(check_ledger_size, retryWait=.5, timeout=timeout)) - sdk_ensure_pool_functional(looper, txnPoolNodeSet, sdk_wallet_client, sdk_pool_handle) + vdr_ensure_pool_functional(looper, txnPoolNodeSet, vdr_wallet_client, vdr_pool_handle) diff --git a/plenum/test/req_drop/test_req_drop_on_prepare_phase_primary.py b/plenum/test/req_drop/test_req_drop_on_prepare_phase_primary.py index a96f10806e..02d5608bdc 100644 --- a/plenum/test/req_drop/test_req_drop_on_prepare_phase_primary.py +++ b/plenum/test/req_drop/test_req_drop_on_prepare_phase_primary.py @@ -1,13 +1,13 @@ import pytest -from plenum.test.helper import sdk_send_random_requests +from plenum.test.helper import vdr_send_random_requests from stp_core.loop.eventually import eventually from plenum.common.messages.node_messages import Prepare, Commit from plenum.test.delayers import delay from plenum.test.propagate.helper import recvdRequest, recvdPropagate, \ sentPropagate, recvdPrepareForInstId, recvdCommitForInstId from plenum.test.test_node import TestNode -from plenum.test.node_request.helper import sdk_ensure_pool_functional +from plenum.test.node_request.helper import vdr_ensure_pool_functional howlong = 20 @@ -34,7 +34,7 @@ def tconf(tconf): @pytest.fixture() -def setup(txnPoolNodeSet, looper, sdk_pool_handle, sdk_wallet_client): +def setup(txnPoolNodeSet, looper, vdr_pool_handle, vdr_wallet_client): global initial_ledger_size A, B, C, D = txnPoolNodeSet # type: TestNode lagged_node = A @@ -42,14 +42,14 @@ def setup(txnPoolNodeSet, looper, sdk_pool_handle, sdk_wallet_client): delay(Prepare, frm=frm, to=lagged_node, howlong=howlong) delay(Commit, frm=frm, to=lagged_node, howlong=howlong + 3) initial_ledger_size = txnPoolNodeSet[0].domainLedger.size - request_couple_json = sdk_send_random_requests( - looper, sdk_pool_handle, sdk_wallet_client, 1) + request_couple_json = vdr_send_random_requests( + looper, vdr_pool_handle, vdr_wallet_client, 1) return request_couple_json def test_req_drop_on_prepare_phase_on_master_primary_and_then_ordered( tconf, setup, looper, txnPoolNodeSet, - sdk_wallet_client, sdk_pool_handle): + vdr_wallet_client, vdr_pool_handle): global initial_ledger_size A, B, C, D = txnPoolNodeSet # type: TestNode lagged_node = A @@ -95,4 +95,4 @@ def check_ledger_size(): looper.run(eventually(check_ledger_size, retryWait=.5, timeout=timeout)) - sdk_ensure_pool_functional(looper, txnPoolNodeSet, sdk_wallet_client, sdk_pool_handle) + vdr_ensure_pool_functional(looper, txnPoolNodeSet, vdr_wallet_client, vdr_pool_handle) diff --git a/plenum/test/req_drop/test_req_drop_on_preprepare_phase_non_primary.py b/plenum/test/req_drop/test_req_drop_on_preprepare_phase_non_primary.py index ad61fed64a..6728d392e7 100644 --- a/plenum/test/req_drop/test_req_drop_on_preprepare_phase_non_primary.py +++ b/plenum/test/req_drop/test_req_drop_on_preprepare_phase_non_primary.py @@ -1,14 +1,14 @@ import pytest from plenum.common.constants import PROPAGATE -from plenum.test.helper import sdk_send_random_requests +from plenum.test.helper import vdr_send_random_requests from stp_core.loop.eventually import eventually from plenum.common.messages.node_messages import PrePrepare, Prepare, Commit from plenum.test.delayers import delay, msg_rep_delay from plenum.test.propagate.helper import recvdRequest, recvdPropagate, \ sentPropagate, recvdPrePrepareForInstId, recvdPrepareForInstId, recvdCommitForInstId from plenum.test.test_node import TestNode -from plenum.test.node_request.helper import sdk_ensure_pool_functional +from plenum.test.node_request.helper import vdr_ensure_pool_functional from plenum.test.spy_helpers import getAllArgs howlong = 20 @@ -38,7 +38,7 @@ def tconf(tconf): @pytest.fixture() -def setup(txnPoolNodeSet, looper, sdk_pool_handle, sdk_wallet_client): +def setup(txnPoolNodeSet, looper, vdr_pool_handle, vdr_wallet_client): global initial_ledger_size A, B, C, D = txnPoolNodeSet # type: TestNode lagged_node = C @@ -50,14 +50,14 @@ def setup(txnPoolNodeSet, looper, sdk_pool_handle, sdk_wallet_client): # is requested A.nodeIbStasher.delay(msg_rep_delay(10 * howlong, [PROPAGATE, ])) initial_ledger_size = lagged_node.domainLedger.size - request_couple_json = sdk_send_random_requests( - looper, sdk_pool_handle, sdk_wallet_client, 1) + request_couple_json = vdr_send_random_requests( + looper, vdr_pool_handle, vdr_wallet_client, 1) return request_couple_json def test_req_drop_on_preprepare_phase_on_non_primary_and_then_ordered( tconf, setup, looper, txnPoolNodeSet, - sdk_wallet_client, sdk_pool_handle): + vdr_wallet_client, vdr_pool_handle): global initial_ledger_size A, B, C, D = txnPoolNodeSet # type: TestNode lagged_node = C @@ -126,4 +126,4 @@ def check_ledger_size(): looper.run(eventually(check_ledger_size, retryWait=.5, timeout=timeout)) - sdk_ensure_pool_functional(looper, txnPoolNodeSet, sdk_wallet_client, sdk_pool_handle) + vdr_ensure_pool_functional(looper, txnPoolNodeSet, vdr_wallet_client, vdr_pool_handle) diff --git a/plenum/test/req_drop/test_req_drop_on_propagate_phase_non_primary.py b/plenum/test/req_drop/test_req_drop_on_propagate_phase_non_primary.py index 23326ad56d..eaae197284 100644 --- a/plenum/test/req_drop/test_req_drop_on_propagate_phase_non_primary.py +++ b/plenum/test/req_drop/test_req_drop_on_propagate_phase_non_primary.py @@ -1,14 +1,14 @@ import pytest from plenum.common.constants import PROPAGATE -from plenum.test.helper import sdk_json_to_request_object, sdk_send_random_requests +from plenum.test.helper import vdr_json_to_request_object, vdr_send_random_requests from stp_core.loop.eventually import eventually from plenum.common.messages.node_messages import Propagate, PrePrepare, Prepare, Commit from plenum.test.delayers import delay, msg_rep_delay from plenum.test.propagate.helper import recvdRequest, recvdPropagate, \ sentPropagate, recvdPrePrepareForInstId, recvdPrepareForInstId, recvdCommitForInstId from plenum.test.test_node import TestNode -from plenum.test.node_request.helper import sdk_ensure_pool_functional +from plenum.test.node_request.helper import vdr_ensure_pool_functional howlong = 20 @@ -35,7 +35,7 @@ def tconf(tconf): @pytest.fixture() -def setup(txnPoolNodeSet, looper, sdk_pool_handle, sdk_wallet_client): +def setup(txnPoolNodeSet, looper, vdr_pool_handle, vdr_wallet_client): global initial_ledger_size A, B, C, D = txnPoolNodeSet # type: TestNode lagged_node = C @@ -48,17 +48,17 @@ def setup(txnPoolNodeSet, looper, sdk_pool_handle, sdk_wallet_client): # is requested A.nodeIbStasher.delay(msg_rep_delay(10 * howlong, [PROPAGATE, ])) initial_ledger_size = lagged_node.domainLedger.size - request_couple_json = sdk_send_random_requests( - looper, sdk_pool_handle, sdk_wallet_client, 1) + request_couple_json = vdr_send_random_requests( + looper, vdr_pool_handle, vdr_wallet_client, 1) return request_couple_json def test_req_drop_on_propagate_phase_on_non_primary_and_then_ordered( tconf, setup, looper, txnPoolNodeSet, - sdk_wallet_client, sdk_pool_handle): + vdr_wallet_client, vdr_pool_handle): global initial_ledger_size A, B, C, D = txnPoolNodeSet # type: TestNode - sent1 = sdk_json_to_request_object(setup[0][0]) + sent1 = vdr_json_to_request_object(setup[0][0]) lagged_node = C def check_propagates_and_3pc_delayed(): @@ -111,4 +111,4 @@ def check_ledger_size(): looper.run(eventually(check_ledger_size, retryWait=.5, timeout=timeout)) - sdk_ensure_pool_functional(looper, txnPoolNodeSet, sdk_wallet_client, sdk_pool_handle) + vdr_ensure_pool_functional(looper, txnPoolNodeSet, vdr_wallet_client, vdr_pool_handle) diff --git a/plenum/test/req_drop/test_req_drop_on_propagate_phase_primary.py b/plenum/test/req_drop/test_req_drop_on_propagate_phase_primary.py index b97b76d94b..a31b0685ea 100644 --- a/plenum/test/req_drop/test_req_drop_on_propagate_phase_primary.py +++ b/plenum/test/req_drop/test_req_drop_on_propagate_phase_primary.py @@ -1,14 +1,14 @@ import pytest from plenum.common.constants import PROPAGATE -from plenum.test.helper import sdk_json_to_request_object, sdk_send_random_requests +from plenum.test.helper import vdr_json_to_request_object, vdr_send_random_requests from stp_core.loop.eventually import eventually from plenum.common.messages.node_messages import Propagate from plenum.test.delayers import delay, msg_rep_delay from plenum.test.propagate.helper import recvdRequest, recvdPropagate, \ sentPropagate from plenum.test.test_node import TestNode -from plenum.test.node_request.helper import sdk_ensure_pool_functional +from plenum.test.node_request.helper import vdr_ensure_pool_functional howlong = 20 @@ -35,7 +35,7 @@ def tconf(tconf): @pytest.fixture() -def setup(txnPoolNodeSet, looper, sdk_pool_handle, sdk_wallet_client): +def setup(txnPoolNodeSet, looper, vdr_pool_handle, vdr_wallet_client): global initial_ledger_size A, B, C, D = txnPoolNodeSet # type: TestNode lagged_node = A @@ -45,17 +45,17 @@ def setup(txnPoolNodeSet, looper, sdk_pool_handle, sdk_wallet_client): # is requested A.nodeIbStasher.delay(msg_rep_delay(10 * howlong, [PROPAGATE, ])) initial_ledger_size = lagged_node.domainLedger.size - request_couple_json = sdk_send_random_requests( - looper, sdk_pool_handle, sdk_wallet_client, 1) + request_couple_json = vdr_send_random_requests( + looper, vdr_pool_handle, vdr_wallet_client, 1) return request_couple_json def test_req_drop_on_propagate_phase_on_master_primary_and_then_ordered( tconf, setup, looper, txnPoolNodeSet, - sdk_wallet_client, sdk_pool_handle): + vdr_wallet_client, vdr_pool_handle): global initial_ledger_size A, B, C, D = txnPoolNodeSet # type: TestNode - sent1 = sdk_json_to_request_object(setup[0][0]) + sent1 = vdr_json_to_request_object(setup[0][0]) lagged_node = A def check_propagates_delayed(): @@ -102,4 +102,4 @@ def check_ledger_size(): looper.run(eventually(check_ledger_size, retryWait=.5, timeout=timeout)) - sdk_ensure_pool_functional(looper, txnPoolNodeSet, sdk_wallet_client, sdk_pool_handle) + vdr_ensure_pool_functional(looper, txnPoolNodeSet, vdr_wallet_client, vdr_pool_handle) diff --git a/plenum/test/req_drop/test_req_drop_when_backup_replica_does_not_order.py b/plenum/test/req_drop/test_req_drop_when_backup_replica_does_not_order.py index 1ec6fca16f..e7a5aaccd6 100644 --- a/plenum/test/req_drop/test_req_drop_when_backup_replica_does_not_order.py +++ b/plenum/test/req_drop/test_req_drop_when_backup_replica_does_not_order.py @@ -1,7 +1,7 @@ import pytest from plenum.server.consensus.utils import replica_name_to_node_name -from plenum.test.helper import sdk_send_random_requests +from plenum.test.helper import vdr_send_random_requests from stp_core.loop.eventually import eventually @@ -37,7 +37,7 @@ def tconf(tconf): def test_req_drop_when_backup_replica_does_not_order( tconf, looper, txnPoolNodeSet, - sdk_wallet_client, sdk_pool_handle): + vdr_wallet_client, vdr_pool_handle): assert len(txnPoolNodeSet[0].replicas) == 3 # Stop the primary of backup replica @@ -48,7 +48,7 @@ def test_req_drop_when_backup_replica_does_not_order( initial_ledger_size = txnPoolNodeSet[0].domainLedger.size - sdk_send_random_requests(looper, sdk_pool_handle, sdk_wallet_client, 1) + vdr_send_random_requests(looper, vdr_pool_handle, vdr_wallet_client, 1) def check_request_queue(): for n in txnPoolNodeSet: diff --git a/plenum/test/requests/test_send_audit_txn.py b/plenum/test/requests/test_send_audit_txn.py index ea3909507d..339a5aedec 100644 --- a/plenum/test/requests/test_send_audit_txn.py +++ b/plenum/test/requests/test_send_audit_txn.py @@ -2,26 +2,26 @@ import time import pytest -from plenum.test.helper import sdk_get_and_check_replies -from plenum.test.pool_transactions.helper import sdk_sign_and_send_prepared_request +from plenum.test.helper import vdr_get_and_check_replies +from plenum.test.pool_transactions.helper import vdr_sign_and_send_prepared_request from plenum.common.exceptions import RequestNackedException from plenum.common.constants import TXN_TYPE, AUDIT, CURRENT_PROTOCOL_VERSION from plenum.common.types import OPERATION, f -def test_send_audit_txn(looper, sdk_wallet_client, sdk_pool_handle): +def test_send_audit_txn(looper, vdr_wallet_client, vdr_pool_handle): req = { OPERATION: { TXN_TYPE: AUDIT, 'data': 'data1' }, - f.IDENTIFIER.nm: sdk_wallet_client[1], + f.IDENTIFIER.nm: vdr_wallet_client[1], f.REQ_ID.nm: int(time.time()), f.PROTOCOL_VERSION.nm: CURRENT_PROTOCOL_VERSION } - rep = sdk_sign_and_send_prepared_request(looper, sdk_wallet_client, sdk_pool_handle, json.dumps(req)) + rep = vdr_sign_and_send_prepared_request(looper, vdr_wallet_client, vdr_pool_handle, json.dumps(req)) with pytest.raises(RequestNackedException) as e: - sdk_get_and_check_replies(looper, [rep]) + vdr_get_and_check_replies(looper, [rep]) e.match('External audit requests are not allowed') diff --git a/plenum/test/restart/test_restart_node_4_all.py b/plenum/test/restart/test_restart_node_4_all.py index cb39c9f843..0ff15559d9 100644 --- a/plenum/test/restart/test_restart_node_4_all.py +++ b/plenum/test/restart/test_restart_node_4_all.py @@ -1,16 +1,16 @@ from plenum.test import waits -from plenum.test.node_request.helper import sdk_ensure_pool_functional +from plenum.test.node_request.helper import vdr_ensure_pool_functional from plenum.test.restart.helper import get_group, restart_nodes nodeCount = 7 def test_restart_groups_4_of_7_np_no_tm(looper, txnPoolNodeSet, tconf, tdir, - sdk_pool_handle, sdk_wallet_client, allPluginsPath): + vdr_pool_handle, vdr_wallet_client, allPluginsPath): tm = tconf.ToleratePrimaryDisconnection + waits.expectedPoolElectionTimeout(len(txnPoolNodeSet)) restart_group = get_group(txnPoolNodeSet, 4, include_primary=False) restart_nodes(looper, txnPoolNodeSet, restart_group, tconf, tdir, allPluginsPath, after_restart_timeout=tm, start_one_by_one=False) - sdk_ensure_pool_functional(looper, txnPoolNodeSet, sdk_wallet_client, sdk_pool_handle) + vdr_ensure_pool_functional(looper, txnPoolNodeSet, vdr_wallet_client, vdr_pool_handle) diff --git a/plenum/test/restart/test_restart_node_with_view_changes.py b/plenum/test/restart/test_restart_node_with_view_changes.py index fcdd5d28e2..b46a629467 100644 --- a/plenum/test/restart/test_restart_node_with_view_changes.py +++ b/plenum/test/restart/test_restart_node_with_view_changes.py @@ -3,7 +3,7 @@ from plenum.common.messages.node_messages import ViewChangeStartMessage from plenum.test.delayers import msg_rep_delay -from plenum.test.helper import sdk_send_random_and_check, assertExp, waitForViewChange +from plenum.test.helper import vdr_send_random_and_check, assertExp, waitForViewChange from plenum.test import waits from plenum.test.node_catchup.helper import waitNodeDataEquality @@ -18,8 +18,8 @@ def test_restart_node_with_view_changes(tdir, tconf, looper, txnPoolNodeSet, - sdk_pool_handle, - sdk_wallet_client, + vdr_pool_handle, + vdr_wallet_client, allPluginsPath): ''' 1. Stop the node Delta @@ -48,8 +48,8 @@ def test_restart_node_with_view_changes(tdir, tconf, looper.removeProdable(lagging_node) # Send more requests to active nodes - sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle, - sdk_wallet_client, len(rest_nodes) * 3) + vdr_send_random_and_check(looper, txnPoolNodeSet, vdr_pool_handle, + vdr_wallet_client, len(rest_nodes) * 3) waitNodeDataEquality(looper, *rest_nodes) # Restart stopped node @@ -114,6 +114,6 @@ def patch_on_view_change_started(node, msg, frm): ensureElectionsDone(looper=looper, nodes=txnPoolNodeSet, instances_list=range(txnPoolNodeSet[0].requiredNumberOfInstances)) - sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle, - sdk_wallet_client, 1) + vdr_send_random_and_check(looper, txnPoolNodeSet, vdr_pool_handle, + vdr_wallet_client, 1) waitNodeDataEquality(looper, *txnPoolNodeSet) diff --git a/plenum/test/restart/test_restart_nodes.py b/plenum/test/restart/test_restart_nodes.py index 6c66592f1c..4e03234f1c 100644 --- a/plenum/test/restart/test_restart_nodes.py +++ b/plenum/test/restart/test_restart_nodes.py @@ -1,16 +1,16 @@ from plenum.test import waits -from plenum.test.node_request.helper import sdk_ensure_pool_functional +from plenum.test.node_request.helper import vdr_ensure_pool_functional from plenum.test.restart.helper import get_group, restart_nodes nodeCount = 7 def test_restart_groups_4_of_7_np_tm(looper, txnPoolNodeSet, tconf, tdir, - sdk_pool_handle, sdk_wallet_client, allPluginsPath): + vdr_pool_handle, vdr_wallet_client, allPluginsPath): tm = tconf.ToleratePrimaryDisconnection + waits.expectedPoolElectionTimeout(len(txnPoolNodeSet)) restart_group = get_group(txnPoolNodeSet, 4, include_primary=False) restart_nodes(looper, txnPoolNodeSet, restart_group, tconf, tdir, allPluginsPath, after_restart_timeout=tm, start_one_by_one=True) - sdk_ensure_pool_functional(looper, txnPoolNodeSet, sdk_wallet_client, sdk_pool_handle) + vdr_ensure_pool_functional(looper, txnPoolNodeSet, vdr_wallet_client, vdr_pool_handle) diff --git a/plenum/test/restart/test_restart_nodes_4_all_wp.py b/plenum/test/restart/test_restart_nodes_4_all_wp.py index ec48f0f815..2ece282f51 100644 --- a/plenum/test/restart/test_restart_nodes_4_all_wp.py +++ b/plenum/test/restart/test_restart_nodes_4_all_wp.py @@ -1,16 +1,16 @@ from plenum.test import waits -from plenum.test.node_request.helper import sdk_ensure_pool_functional +from plenum.test.node_request.helper import vdr_ensure_pool_functional from plenum.test.restart.helper import get_group, restart_nodes nodeCount = 7 def test_restart_groups_4_of_7_wp_no_tm(looper, txnPoolNodeSet, tconf, tdir, - sdk_pool_handle, sdk_wallet_client, allPluginsPath): + vdr_pool_handle, vdr_wallet_client, allPluginsPath): tm = tconf.ToleratePrimaryDisconnection + waits.expectedPoolElectionTimeout(len(txnPoolNodeSet)) restart_group = get_group(txnPoolNodeSet, 4, include_primary=True) restart_nodes(looper, txnPoolNodeSet, restart_group, tconf, tdir, allPluginsPath, after_restart_timeout=tm, start_one_by_one=False) - sdk_ensure_pool_functional(looper, txnPoolNodeSet, sdk_wallet_client, sdk_pool_handle) + vdr_ensure_pool_functional(looper, txnPoolNodeSet, vdr_wallet_client, vdr_pool_handle) diff --git a/plenum/test/restart/test_restart_nodes_4_np.py b/plenum/test/restart/test_restart_nodes_4_np.py index be76275c6e..e4c4c201ef 100644 --- a/plenum/test/restart/test_restart_nodes_4_np.py +++ b/plenum/test/restart/test_restart_nodes_4_np.py @@ -1,16 +1,16 @@ from plenum.test import waits -from plenum.test.node_request.helper import sdk_ensure_pool_functional +from plenum.test.node_request.helper import vdr_ensure_pool_functional from plenum.test.restart.helper import get_group, restart_nodes nodeCount = 7 def test_restart_groups_4_of_7_wp_tm(looper, txnPoolNodeSet, tconf, tdir, - sdk_pool_handle, sdk_wallet_client, allPluginsPath): + vdr_pool_handle, vdr_wallet_client, allPluginsPath): tm = tconf.ToleratePrimaryDisconnection + waits.expectedPoolElectionTimeout(len(txnPoolNodeSet)) restart_group = get_group(txnPoolNodeSet, 4, include_primary=True) restart_nodes(looper, txnPoolNodeSet, restart_group, tconf, tdir, allPluginsPath, after_restart_timeout=tm, start_one_by_one=True) - sdk_ensure_pool_functional(looper, txnPoolNodeSet, sdk_wallet_client, sdk_pool_handle) + vdr_ensure_pool_functional(looper, txnPoolNodeSet, vdr_wallet_client, vdr_pool_handle) diff --git a/plenum/test/restart/test_restart_nodes_6_all_np.py b/plenum/test/restart/test_restart_nodes_6_all_np.py index d6f2465e9d..97161554c6 100644 --- a/plenum/test/restart/test_restart_nodes_6_all_np.py +++ b/plenum/test/restart/test_restart_nodes_6_all_np.py @@ -1,16 +1,16 @@ from plenum.test import waits -from plenum.test.node_request.helper import sdk_ensure_pool_functional +from plenum.test.node_request.helper import vdr_ensure_pool_functional from plenum.test.restart.helper import get_group, restart_nodes nodeCount = 7 def test_restart_groups_6_of_7_np_no_tm(looper, txnPoolNodeSet, tconf, tdir, - sdk_pool_handle, sdk_wallet_client, allPluginsPath): + vdr_pool_handle, vdr_wallet_client, allPluginsPath): tm = tconf.ToleratePrimaryDisconnection + waits.expectedPoolElectionTimeout(len(txnPoolNodeSet)) restart_group = get_group(txnPoolNodeSet, 6, include_primary=False) restart_nodes(looper, txnPoolNodeSet, restart_group, tconf, tdir, allPluginsPath, after_restart_timeout=tm, start_one_by_one=False) - sdk_ensure_pool_functional(looper, txnPoolNodeSet, sdk_wallet_client, sdk_pool_handle) + vdr_ensure_pool_functional(looper, txnPoolNodeSet, vdr_wallet_client, vdr_pool_handle) diff --git a/plenum/test/restart/test_restart_nodes_6_all_wp.py b/plenum/test/restart/test_restart_nodes_6_all_wp.py index e23272e302..9939df9f7d 100644 --- a/plenum/test/restart/test_restart_nodes_6_all_wp.py +++ b/plenum/test/restart/test_restart_nodes_6_all_wp.py @@ -1,16 +1,16 @@ from plenum.test import waits -from plenum.test.node_request.helper import sdk_ensure_pool_functional +from plenum.test.node_request.helper import vdr_ensure_pool_functional from plenum.test.restart.helper import get_group, restart_nodes nodeCount = 7 def test_restart_groups_6_of_7_wp_no_tm(looper, txnPoolNodeSet, tconf, tdir, - sdk_pool_handle, sdk_wallet_client, allPluginsPath): + vdr_pool_handle, vdr_wallet_client, allPluginsPath): tm = tconf.ToleratePrimaryDisconnection + waits.expectedPoolElectionTimeout(len(txnPoolNodeSet)) restart_group = get_group(txnPoolNodeSet, 6, include_primary=True) restart_nodes(looper, txnPoolNodeSet, restart_group, tconf, tdir, allPluginsPath, after_restart_timeout=tm, start_one_by_one=False) - sdk_ensure_pool_functional(looper, txnPoolNodeSet, sdk_wallet_client, sdk_pool_handle) + vdr_ensure_pool_functional(looper, txnPoolNodeSet, vdr_wallet_client, vdr_pool_handle) diff --git a/plenum/test/restart/test_restart_nodes_6_np.py b/plenum/test/restart/test_restart_nodes_6_np.py index afb6c26b0c..8797199788 100644 --- a/plenum/test/restart/test_restart_nodes_6_np.py +++ b/plenum/test/restart/test_restart_nodes_6_np.py @@ -1,16 +1,16 @@ from plenum.test import waits -from plenum.test.node_request.helper import sdk_ensure_pool_functional +from plenum.test.node_request.helper import vdr_ensure_pool_functional from plenum.test.restart.helper import get_group, restart_nodes nodeCount = 7 def test_restart_groups_6_of_7_np_tm(looper, txnPoolNodeSet, tconf, tdir, - sdk_pool_handle, sdk_wallet_client, allPluginsPath): + vdr_pool_handle, vdr_wallet_client, allPluginsPath): tm = tconf.ToleratePrimaryDisconnection + waits.expectedPoolElectionTimeout(len(txnPoolNodeSet)) restart_group = get_group(txnPoolNodeSet, 6, include_primary=False) restart_nodes(looper, txnPoolNodeSet, restart_group, tconf, tdir, allPluginsPath, after_restart_timeout=tm, start_one_by_one=True) - sdk_ensure_pool_functional(looper, txnPoolNodeSet, sdk_wallet_client, sdk_pool_handle) + vdr_ensure_pool_functional(looper, txnPoolNodeSet, vdr_wallet_client, vdr_pool_handle) diff --git a/plenum/test/restart/test_restart_nodes_6_wp.py b/plenum/test/restart/test_restart_nodes_6_wp.py index 12c50b74b5..644d4d6c2a 100644 --- a/plenum/test/restart/test_restart_nodes_6_wp.py +++ b/plenum/test/restart/test_restart_nodes_6_wp.py @@ -1,16 +1,16 @@ from plenum.test import waits -from plenum.test.node_request.helper import sdk_ensure_pool_functional +from plenum.test.node_request.helper import vdr_ensure_pool_functional from plenum.test.restart.helper import get_group, restart_nodes nodeCount = 7 def test_restart_groups_6_of_7_wp_tm(looper, txnPoolNodeSet, tconf, tdir, - sdk_pool_handle, sdk_wallet_client, allPluginsPath): + vdr_pool_handle, vdr_wallet_client, allPluginsPath): tm = tconf.ToleratePrimaryDisconnection + waits.expectedPoolElectionTimeout(len(txnPoolNodeSet)) restart_group = get_group(txnPoolNodeSet, 6, include_primary=True) restart_nodes(looper, txnPoolNodeSet, restart_group, tconf, tdir, allPluginsPath, after_restart_timeout=tm, start_one_by_one=True) - sdk_ensure_pool_functional(looper, txnPoolNodeSet, sdk_wallet_client, sdk_pool_handle) + vdr_ensure_pool_functional(looper, txnPoolNodeSet, vdr_wallet_client, vdr_pool_handle) diff --git a/plenum/test/restart/test_restart_nodes_7.py b/plenum/test/restart/test_restart_nodes_7.py index 074a3467a1..24ba5e3919 100644 --- a/plenum/test/restart/test_restart_nodes_7.py +++ b/plenum/test/restart/test_restart_nodes_7.py @@ -1,16 +1,16 @@ from plenum.test import waits -from plenum.test.node_request.helper import sdk_ensure_pool_functional +from plenum.test.node_request.helper import vdr_ensure_pool_functional from plenum.test.restart.helper import get_group, restart_nodes nodeCount = 7 def test_restart_groups_7_of_7_wp_tm(looper, txnPoolNodeSet, tconf, tdir, - sdk_pool_handle, sdk_wallet_client, allPluginsPath): + vdr_pool_handle, vdr_wallet_client, allPluginsPath): tm = tconf.ToleratePrimaryDisconnection + waits.expectedPoolElectionTimeout(len(txnPoolNodeSet)) restart_group = get_group(txnPoolNodeSet, 7) restart_nodes(looper, txnPoolNodeSet, restart_group, tconf, tdir, allPluginsPath, after_restart_timeout=tm, start_one_by_one=True) - sdk_ensure_pool_functional(looper, txnPoolNodeSet, sdk_wallet_client, sdk_pool_handle) + vdr_ensure_pool_functional(looper, txnPoolNodeSet, vdr_wallet_client, vdr_pool_handle) diff --git a/plenum/test/restart/test_restart_nodes_7_all.py b/plenum/test/restart/test_restart_nodes_7_all.py index b641f1dd4e..65a8cfd6b2 100644 --- a/plenum/test/restart/test_restart_nodes_7_all.py +++ b/plenum/test/restart/test_restart_nodes_7_all.py @@ -1,16 +1,16 @@ from plenum.test import waits -from plenum.test.node_request.helper import sdk_ensure_pool_functional +from plenum.test.node_request.helper import vdr_ensure_pool_functional from plenum.test.restart.helper import get_group, restart_nodes nodeCount = 7 def test_restart_groups_7_of_7_wp_no_tm(looper, txnPoolNodeSet, tconf, tdir, - sdk_pool_handle, sdk_wallet_client, allPluginsPath): + vdr_pool_handle, vdr_wallet_client, allPluginsPath): tm = tconf.ToleratePrimaryDisconnection + waits.expectedPoolElectionTimeout(len(txnPoolNodeSet)) restart_group = get_group(txnPoolNodeSet, 7) restart_nodes(looper, txnPoolNodeSet, restart_group, tconf, tdir, allPluginsPath, after_restart_timeout=tm, start_one_by_one=False) - sdk_ensure_pool_functional(looper, txnPoolNodeSet, sdk_wallet_client, sdk_pool_handle) + vdr_ensure_pool_functional(looper, txnPoolNodeSet, vdr_wallet_client, vdr_pool_handle) diff --git a/plenum/test/restart/test_restart_to_inconsistent_state.py b/plenum/test/restart/test_restart_to_inconsistent_state.py index ded407d0c2..90d9090d8e 100644 --- a/plenum/test/restart/test_restart_to_inconsistent_state.py +++ b/plenum/test/restart/test_restart_to_inconsistent_state.py @@ -1,8 +1,8 @@ import pytest from plenum.test import waits -from plenum.test.helper import sdk_send_random_and_check -from plenum.test.node_request.helper import sdk_ensure_pool_functional +from plenum.test.helper import vdr_send_random_and_check +from plenum.test.node_request.helper import vdr_ensure_pool_functional from plenum.test.restart.helper import restart_nodes from plenum.test.test_node import ensureElectionsDone from plenum.test.view_change.helper import ensure_view_change_complete @@ -19,9 +19,9 @@ def tconf(tconf): def test_restart_majority_to_same_view(looper, txnPoolNodeSet, tconf, tdir, allPluginsPath, - sdk_pool_handle, sdk_wallet_client): + vdr_pool_handle, vdr_wallet_client): # Add transaction to ledger - sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_client, 1) + vdr_send_random_and_check(looper, txnPoolNodeSet, vdr_pool_handle, vdr_wallet_client, 1) majority = txnPoolNodeSet[:3] minority = txnPoolNodeSet[3:] @@ -50,13 +50,13 @@ def test_restart_majority_to_same_view(looper, txnPoolNodeSet, tconf, tdir, allP after_restart_timeout=tm, start_one_by_one=False) # Check that all nodes are still functional - sdk_ensure_pool_functional(looper, txnPoolNodeSet, sdk_wallet_client, sdk_pool_handle) + vdr_ensure_pool_functional(looper, txnPoolNodeSet, vdr_wallet_client, vdr_pool_handle) def test_restart_majority_to_lower_view(looper, txnPoolNodeSet, tconf, tdir, allPluginsPath, - sdk_pool_handle, sdk_wallet_client): + vdr_pool_handle, vdr_wallet_client): # Add transaction to ledger - sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_client, 1) + vdr_send_random_and_check(looper, txnPoolNodeSet, vdr_pool_handle, vdr_wallet_client, 1) # Move to higher view ensure_view_change_complete(looper, txnPoolNodeSet) @@ -88,13 +88,13 @@ def test_restart_majority_to_lower_view(looper, txnPoolNodeSet, tconf, tdir, all after_restart_timeout=tm, start_one_by_one=False) # Check that all nodes are still functional - sdk_ensure_pool_functional(looper, txnPoolNodeSet, sdk_wallet_client, sdk_pool_handle) + vdr_ensure_pool_functional(looper, txnPoolNodeSet, vdr_wallet_client, vdr_pool_handle) def test_restart_half_to_lower_view(looper, txnPoolNodeSet, tconf, tdir, allPluginsPath, - sdk_pool_handle, sdk_wallet_client): + vdr_pool_handle, vdr_wallet_client): # Add transaction to ledger - sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_client, 1) + vdr_send_random_and_check(looper, txnPoolNodeSet, vdr_pool_handle, vdr_wallet_client, 1) # Move to higher view ensure_view_change_complete(looper, txnPoolNodeSet) @@ -114,4 +114,4 @@ def test_restart_half_to_lower_view(looper, txnPoolNodeSet, tconf, tdir, allPlug assert node.spylog.count(node.on_inconsistent_3pc_state) == 0 # Check that all nodes are still functional - sdk_ensure_pool_functional(looper, txnPoolNodeSet, sdk_wallet_client, sdk_pool_handle, num_reqs=2, num_batches=1) + vdr_ensure_pool_functional(looper, txnPoolNodeSet, vdr_wallet_client, vdr_pool_handle, num_reqs=2, num_batches=1) diff --git a/plenum/test/restart/test_restart_to_same_view_with_killed_primary.py b/plenum/test/restart/test_restart_to_same_view_with_killed_primary.py index 42c1c54794..f80a162d26 100644 --- a/plenum/test/restart/test_restart_to_same_view_with_killed_primary.py +++ b/plenum/test/restart/test_restart_to_same_view_with_killed_primary.py @@ -1,8 +1,8 @@ import pytest from plenum.test import waits -from plenum.test.helper import sdk_send_random_and_check, waitForViewChange, view_change_timeout -from plenum.test.node_request.helper import sdk_ensure_pool_functional +from plenum.test.helper import vdr_send_random_and_check, waitForViewChange, view_change_timeout +from plenum.test.node_request.helper import vdr_ensure_pool_functional from plenum.test.restart.helper import restart_nodes from plenum.test.test_node import ensureElectionsDone, ensure_node_disconnected @@ -21,7 +21,7 @@ def tconf(tconf): def test_restart_to_same_view_with_killed_primary(looper, txnPoolNodeSet, tconf, tdir, allPluginsPath, - sdk_pool_handle, sdk_wallet_client): + vdr_pool_handle, vdr_wallet_client): restart_timeout = tconf.ToleratePrimaryDisconnection + \ waits.expectedPoolElectionTimeout(len(txnPoolNodeSet)) @@ -39,7 +39,7 @@ def test_restart_to_same_view_with_killed_primary(looper, txnPoolNodeSet, tconf, ensureElectionsDone(looper, alive_nodes, instances_list=range(3)) # Add transaction to ledger - sdk_send_random_and_check(looper, alive_nodes, sdk_pool_handle, sdk_wallet_client, 1) + vdr_send_random_and_check(looper, alive_nodes, vdr_pool_handle, vdr_wallet_client, 1) # Restart majority group majority_before_restart = majority.copy() @@ -66,4 +66,4 @@ def test_restart_to_same_view_with_killed_primary(looper, txnPoolNodeSet, tconf, ensureElectionsDone(looper, alive_nodes, instances_list=range(3)) # Check that all nodes are still functional - sdk_ensure_pool_functional(looper, alive_nodes, sdk_wallet_client, sdk_pool_handle) + vdr_ensure_pool_functional(looper, alive_nodes, vdr_wallet_client, vdr_pool_handle) diff --git a/plenum/test/script/helper.py b/plenum/test/script/helper.py index b299f3d6b8..1b014a7764 100644 --- a/plenum/test/script/helper.py +++ b/plenum/test/script/helper.py @@ -1,11 +1,11 @@ import pytest from plenum.common.constants import VALIDATOR -from plenum.test.pool_transactions.helper import sdk_send_update_node, sdk_pool_refresh +from plenum.test.pool_transactions.helper import vdr_send_update_node, vdr_pool_refresh from stp_core.common.log import getlogger from plenum.common.util import hexToFriendly from plenum.test import waits -from plenum.test.helper import sdk_send_random_and_check +from plenum.test.helper import vdr_send_random_and_check from plenum.test.test_node import TestNode, checkNodesConnected, \ ensureElectionsDone from stp_core.network.port_dispenser import genHa @@ -40,7 +40,7 @@ def changeNodeHa(looper, txnPoolNodeSet, # change HA sdk_wallet_steward = sdk_wallet_stewards[node_index] node_dest = hexToFriendly(subjectedNode.nodestack.verhex) - sdk_send_update_node(looper, sdk_wallet_steward, + vdr_send_update_node(looper, sdk_wallet_steward, sdk_pool_handle, node_dest, subjectedNode.name, nodeStackNewHA[0], nodeStackNewHA[1], @@ -70,8 +70,8 @@ def changeNodeHa(looper, txnPoolNodeSet, retryWait=1, customTimeout=electionTimeout) - sdk_pool_refresh(looper, sdk_pool_handle) - sdk_send_random_and_check(looper, txnPoolNodeSet, + vdr_pool_refresh(looper, sdk_pool_handle) + vdr_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_client, 8) diff --git a/plenum/test/script/test_change_non_primary_node_ha.py b/plenum/test/script/test_change_non_primary_node_ha.py index 9bfb39bb9a..7c0c9d220a 100644 --- a/plenum/test/script/test_change_non_primary_node_ha.py +++ b/plenum/test/script/test_change_non_primary_node_ha.py @@ -15,13 +15,13 @@ @pytest.mark.skipif('sys.platform == "win32"', reason='SOV-330') def testChangeNodeHaForNonPrimary(looper, txnPoolNodeSet, poolTxnData, poolTxnStewardNames, tconf, tdir, - sdk_pool_handle, sdk_wallet_stewards, - sdk_wallet_client): + vdr_pool_handle, vdr_wallet_stewards, + vdr_wallet_client): changeNodeHa(looper, txnPoolNodeSet, tconf, shouldBePrimary=False, tdir=tdir, - sdk_pool_handle=sdk_pool_handle, - sdk_wallet_stewards=sdk_wallet_stewards, - sdk_wallet_client=sdk_wallet_client) + sdk_pool_handle=vdr_pool_handle, + sdk_wallet_stewards=vdr_wallet_stewards, + sdk_wallet_client=vdr_wallet_client) diff --git a/plenum/test/script/test_change_primary_node_ha.py b/plenum/test/script/test_change_primary_node_ha.py index cdf200047e..5048a5517d 100644 --- a/plenum/test/script/test_change_primary_node_ha.py +++ b/plenum/test/script/test_change_primary_node_ha.py @@ -16,13 +16,13 @@ @pytest.mark.skipif('sys.platform == "win32"', reason='SOV-330') def testChangeNodeHaForPrimary(looper, txnPoolNodeSet, poolTxnData, poolTxnStewardNames, tconf, tdir, - sdk_pool_handle, sdk_wallet_stewards, - sdk_wallet_client): + vdr_pool_handle, vdr_wallet_stewards, + vdr_wallet_client): changeNodeHa(looper, txnPoolNodeSet, tconf, shouldBePrimary=True, tdir=tdir, - sdk_pool_handle=sdk_pool_handle, - sdk_wallet_stewards=sdk_wallet_stewards, - sdk_wallet_client=sdk_wallet_client) + sdk_pool_handle=vdr_pool_handle, + sdk_wallet_stewards=vdr_wallet_stewards, + sdk_wallet_client=vdr_wallet_client) diff --git a/plenum/test/sdk/test_sdk_bindings.py b/plenum/test/sdk/test_sdk_bindings.py index 3e5a4230c5..501cd46d55 100644 --- a/plenum/test/sdk/test_sdk_bindings.py +++ b/plenum/test/sdk/test_sdk_bindings.py @@ -1,92 +1,92 @@ -from plenum.test.helper import sdk_send_random_request, \ - sdk_send_random_requests, sdk_get_and_check_replies, sdk_send_random_and_check -from plenum.test.pool_transactions.helper import sdk_pool_refresh +from plenum.test.helper import vdr_send_random_request, \ + vdr_send_random_requests, vdr_get_and_check_replies, vdr_send_random_and_check +from plenum.test.pool_transactions.helper import vdr_pool_refresh -def test_sdk_pool_handle(sdk_pool_handle): - ph = sdk_pool_handle +def test_sdk_pool_handle(vdr_pool_handle): + ph = vdr_pool_handle assert ph > 0 -def test_sdk_wallet_handle(sdk_wallet_handle): - wh = sdk_wallet_handle +def test_sdk_wallet_handle(vdr_wallet_handle): + wh = vdr_wallet_handle assert wh > 0 -def test_sdk_trustee_wallet(sdk_wallet_trustee): - wh, tr_did = sdk_wallet_trustee +def test_sdk_trustee_wallet(vdr_wallet_trustee): + wh, tr_did = vdr_wallet_trustee assert wh > 0 assert tr_did -def test_sdk_steward_wallet(sdk_wallet_steward): - wh, st_did = sdk_wallet_steward +def test_sdk_steward_wallet(vdr_wallet_steward): + wh, st_did = vdr_wallet_steward assert wh > 0 assert st_did -def test_sdk_client_wallet(sdk_wallet_client): - wh, cl_did = sdk_wallet_client +def test_sdk_client_wallet(vdr_wallet_client): + wh, cl_did = vdr_wallet_client assert wh > 0 assert cl_did -def test_sdk_new_client_wallet(sdk_wallet_new_client): - wh, cl_did = sdk_wallet_new_client +def test_sdk_new_client_wallet(vdr_wallet_new_client): + wh, cl_did = vdr_wallet_new_client assert wh > 0 assert cl_did -def test_sdk_new_steward_wallet(sdk_wallet_new_steward): - wh, cl_did = sdk_wallet_new_steward +def test_sdk_new_steward_wallet(vdr_wallet_new_steward): + wh, cl_did = vdr_wallet_new_steward assert wh > 0 assert cl_did -def test_sdk_trustee_send(looper, sdk_pool_handle, sdk_wallet_trustee): - resp_task = sdk_send_random_request(looper, sdk_pool_handle, sdk_wallet_trustee) - _, j_resp = sdk_get_and_check_replies(looper, [resp_task])[0] +def test_sdk_trustee_send(looper, vdr_pool_handle, vdr_wallet_trustee): + resp_task = vdr_send_random_request(looper, vdr_pool_handle, vdr_wallet_trustee) + _, j_resp = vdr_get_and_check_replies(looper, [resp_task])[0] assert j_resp['result'] -def test_sdk_steward_send(looper, sdk_pool_handle, sdk_wallet_steward): - resp_task = sdk_send_random_request(looper, sdk_pool_handle, sdk_wallet_steward) - _, j_resp = sdk_get_and_check_replies(looper, [resp_task])[0] +def test_sdk_steward_send(looper, vdr_pool_handle, vdr_wallet_steward): + resp_task = vdr_send_random_request(looper, vdr_pool_handle, vdr_wallet_steward) + _, j_resp = vdr_get_and_check_replies(looper, [resp_task])[0] assert j_resp['result'] -def test_sdk_client_send(looper, sdk_pool_handle, sdk_wallet_client): - resp_task = sdk_send_random_request(looper, sdk_pool_handle, sdk_wallet_client) - _, j_resp = sdk_get_and_check_replies(looper, [resp_task])[0] +def test_sdk_client_send(looper, vdr_pool_handle, vdr_wallet_client): + resp_task = vdr_send_random_request(looper, vdr_pool_handle, vdr_wallet_client) + _, j_resp = vdr_get_and_check_replies(looper, [resp_task])[0] assert j_resp['result'] -def test_sdk_client2_send(looper, sdk_pool_handle, sdk_wallet_client2): - resp_task = sdk_send_random_request(looper, sdk_pool_handle, sdk_wallet_client2) - _, j_resp = sdk_get_and_check_replies(looper, [resp_task])[0] +def test_sdk_client2_send(looper, vdr_pool_handle, vdr_wallet_client2): + resp_task = vdr_send_random_request(looper, vdr_pool_handle, vdr_wallet_client2) + _, j_resp = vdr_get_and_check_replies(looper, [resp_task])[0] assert j_resp['result'] -def test_sdk_new_client_send(looper, sdk_pool_handle, sdk_wallet_new_client): - resp_task = sdk_send_random_request(looper, sdk_pool_handle, sdk_wallet_new_client) - _, j_resp = sdk_get_and_check_replies(looper, [resp_task])[0] +def test_sdk_new_client_send(looper, vdr_pool_handle, vdr_wallet_new_client): + resp_task = vdr_send_random_request(looper, vdr_pool_handle, vdr_wallet_new_client) + _, j_resp = vdr_get_and_check_replies(looper, [resp_task])[0] assert j_resp['result'] -def test_sdk_new_steward_send(looper, sdk_pool_handle, sdk_wallet_new_steward): - resp_task = sdk_send_random_request(looper, sdk_pool_handle, sdk_wallet_new_steward) - _, j_resp = sdk_get_and_check_replies(looper, [resp_task])[0] +def test_sdk_new_steward_send(looper, vdr_pool_handle, vdr_wallet_new_steward): + resp_task = vdr_send_random_request(looper, vdr_pool_handle, vdr_wallet_new_steward) + _, j_resp = vdr_get_and_check_replies(looper, [resp_task])[0] assert j_resp['result'] -def test_sdk_steward_send_many(looper, sdk_pool_handle, sdk_wallet_steward): - resp_task = sdk_send_random_requests(looper, sdk_pool_handle, sdk_wallet_steward, 30) - repl = sdk_get_and_check_replies(looper, resp_task) +def test_sdk_steward_send_many(looper, vdr_pool_handle, vdr_wallet_steward): + resp_task = vdr_send_random_requests(looper, vdr_pool_handle, vdr_wallet_steward, 30) + repl = vdr_get_and_check_replies(looper, resp_task) for _, resp in repl: assert resp['result'] -def test_sdk_pool_refresh(looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_client): - sdk_pool_refresh(looper, sdk_pool_handle) - sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle, - sdk_wallet_client, 1) +def test_sdk_pool_refresh(looper, txnPoolNodeSet, vdr_pool_handle, vdr_wallet_client): + vdr_pool_refresh(looper, vdr_pool_handle) + vdr_send_random_and_check(looper, txnPoolNodeSet, vdr_pool_handle, + vdr_wallet_client, 1) diff --git a/plenum/test/sdk/test_sdk_many_stewards.py b/plenum/test/sdk/test_sdk_many_stewards.py index d4de053ddc..73c5aa3b6b 100644 --- a/plenum/test/sdk/test_sdk_many_stewards.py +++ b/plenum/test/sdk/test_sdk_many_stewards.py @@ -1,5 +1,5 @@ import pytest -from plenum.test.helper import sdk_send_random_requests, sdk_get_and_check_replies +from plenum.test.helper import vdr_send_random_requests, vdr_get_and_check_replies @pytest.fixture(scope="function", autouse=True) @@ -7,9 +7,9 @@ def limitTestRunningTime(): return 300 -def test_sdk_many_stewards_send_many(looper, sdk_pool_handle, sdk_wallet_stewards): - for sdk_wallet_steward in sdk_wallet_stewards: - resp_task = sdk_send_random_requests(looper, sdk_pool_handle, sdk_wallet_steward, 30) - repl = sdk_get_and_check_replies(looper, resp_task, timeout=90) +def test_sdk_many_stewards_send_many(looper, vdr_pool_handle, vdr_wallet_stewards): + for sdk_wallet_steward in vdr_wallet_stewards: + resp_task = vdr_send_random_requests(looper, vdr_pool_handle, sdk_wallet_steward, 30) + repl = vdr_get_and_check_replies(looper, resp_task, timeout=90) for _, resp in repl: assert resp['result'] \ No newline at end of file diff --git a/plenum/test/signing/test_create_did_without_endorser.py b/plenum/test/signing/test_create_did_without_endorser.py index 5e4ac88ce6..8fe3444ba4 100644 --- a/plenum/test/signing/test_create_did_without_endorser.py +++ b/plenum/test/signing/test_create_did_without_endorser.py @@ -2,16 +2,16 @@ import types import pytest -from indy.did import create_and_store_my_did -from indy.ledger import build_nym_request +from plenum.test.wallet_helper import vdr_create_and_store_did +from indy_vdr.ledger import build_nym_request from plenum.common.constants import NYM, STEWARD, ROLE, VERKEY from plenum.common.exceptions import UnauthorizedClientRequest, RequestNackedException, CouldNotAuthenticate from plenum.common.txn_util import get_request_data from plenum.common.util import randomString from plenum.server.request_handlers.utils import get_nym_details -from plenum.test.helper import sdk_get_and_check_replies -from plenum.test.pool_transactions.helper import sdk_sign_and_send_prepared_request +from plenum.test.helper import vdr_get_and_check_replies +from plenum.test.pool_transactions.helper import vdr_sign_and_send_prepared_request NEW_ROLE = None @@ -40,48 +40,46 @@ def patched_dynamic_validation(self, request, req_pp_time): @pytest.fixture(scope='function') -def nym_txn_data(looper, sdk_wallet_client): +def nym_txn_data(looper, vdr_wallet_client): seed = randomString(32) - wh, _ = sdk_wallet_client + wh, _ = vdr_wallet_client sender_did, sender_verkey = \ - looper.loop.run_until_complete(create_and_store_my_did(wh, json.dumps({'seed': seed}))) + looper.loop.run_until_complete(vdr_create_and_store_did(wh, seed)) return wh, randomString(5), sender_did, sender_verkey -def test_create_did_without_endorser(looper, txnPoolNodeSet, nym_txn_data, sdk_pool_handle, patch_nym_validation): +def test_create_did_without_endorser(looper, txnPoolNodeSet, nym_txn_data, vdr_pool_handle, patch_nym_validation): wh, alias, sender_did, sender_verkey = nym_txn_data - nym_request = looper.loop.run_until_complete( - build_nym_request(sender_did, sender_did, sender_verkey, alias, NEW_ROLE)) + nym_request = build_nym_request(sender_did, sender_did, sender_verkey, alias, NEW_ROLE) - request_couple = sdk_sign_and_send_prepared_request(looper, (wh, sender_did), sdk_pool_handle, nym_request) - sdk_get_and_check_replies(looper, [request_couple]) + request_couple = vdr_sign_and_send_prepared_request(looper, (wh, sender_did), vdr_pool_handle, nym_request) + vdr_get_and_check_replies(looper, [request_couple]) details = get_nym_details(txnPoolNodeSet[0].states[1], sender_did, is_committed=True) assert details[ROLE] == NEW_ROLE assert details[VERKEY] == sender_verkey -def test_create_did_without_endorser_empty_verkey(looper, nym_txn_data, sdk_wallet_client, sdk_pool_handle, +def test_create_did_without_endorser_empty_verkey(looper, nym_txn_data, vdr_wallet_client, vdr_pool_handle, patch_nym_validation): wh, alias, sender_did, sender_verkey = nym_txn_data - nym_request = looper.loop.run_until_complete(build_nym_request(sender_did, sender_did, None, alias, NEW_ROLE)) + nym_request = build_nym_request(sender_did, sender_did, None, alias, NEW_ROLE) - request_couple = sdk_sign_and_send_prepared_request(looper, (wh, sender_did), sdk_pool_handle, nym_request) + request_couple = vdr_sign_and_send_prepared_request(looper, (wh, sender_did), vdr_pool_handle, nym_request) with pytest.raises(RequestNackedException, match=CouldNotAuthenticate.reason.format(sender_did)): - sdk_get_and_check_replies(looper, [request_couple]) + vdr_get_and_check_replies(looper, [request_couple]) -def test_create_did_without_endorser_different_dest(looper, nym_txn_data, sdk_wallet_client, sdk_pool_handle, +def test_create_did_without_endorser_different_dest(looper, nym_txn_data, vdr_wallet_client, vdr_pool_handle, patch_nym_validation): wh, alias, sender_did, sender_verkey = nym_txn_data - nym_request = looper.loop.run_until_complete( - build_nym_request(sender_did, sdk_wallet_client[1], sender_verkey, alias, NEW_ROLE)) + nym_request = build_nym_request(sender_did, vdr_wallet_client[1], sender_verkey, alias, NEW_ROLE) - request_couple = sdk_sign_and_send_prepared_request(looper, (wh, sender_did), sdk_pool_handle, nym_request) + request_couple = vdr_sign_and_send_prepared_request(looper, (wh, sender_did), vdr_pool_handle, nym_request) with pytest.raises(RequestNackedException, match=CouldNotAuthenticate.reason.format(sender_did)): - sdk_get_and_check_replies(looper, [request_couple]) + vdr_get_and_check_replies(looper, [request_couple]) diff --git a/plenum/test/signing/test_signing.py b/plenum/test/signing/test_signing.py index 2c3a234a58..e6b6cd1db9 100644 --- a/plenum/test/signing/test_signing.py +++ b/plenum/test/signing/test_signing.py @@ -4,9 +4,7 @@ from plenum.common.constants import CURRENT_PROTOCOL_VERSION from plenum.test.delayers import ppDelay, req_delay -from plenum.test.helper import sdk_json_to_request_object, \ - sdk_send_random_requests, sdk_random_request_objects, sdk_multisign_request_object, sdk_send_signed_requests, \ - sdk_get_and_check_replies +from plenum.test.helper import vdr_send_random_requests, vdr_get_replies, sdk_random_request_objects, sdk_multisign_request_object, sdk_send_signed_requests, sdk_get_and_check_replies from plenum.test.node_request.test_reply_from_ledger_for_request import deserialize_req from stp_core.loop.eventually import eventually from plenum.common.exceptions import InsufficientCorrectSignatures, RequestNackedException @@ -32,8 +30,8 @@ def evilAlpha(txnPoolNodeSet): def testOneNodeAltersAClientRequest(looper, txnPoolNodeSet, evilAlpha, - sdk_pool_handle, - sdk_wallet_client): + vdr_pool_handle, + vdr_wallet_client): """Malicious Alpha node sends incorrect propagate. This test check that nodes raise InsufficientCorrectSignatures in validate this propagate""" @@ -45,14 +43,16 @@ def testOneNodeAltersAClientRequest(looper, goodNodes.remove(alpha) # delay incoming client messages for good nodes by 250 milliseconds # this gives Alpha a chance to send a propagate message + + # The previous delay was sent to 8 and a half years not 0.25 seconds for n in goodNodes: # type: TestNode - n.nodeIbStasher.delay(ppDelay(sys.maxsize)) - n.nodeIbStasher.delay(req_delay(sys.maxsize)) + n.nodeIbStasher.delay(ppDelay(0.250)) + n.nodeIbStasher.delay(req_delay(0.250)) pastNodes = [] - request_couple_json = sdk_send_random_requests(looper, sdk_pool_handle, - sdk_wallet_client, 1) - sent1 = sdk_json_to_request_object(request_couple_json[0][0]) + sent1 = vdr_send_random_requests(looper, vdr_pool_handle, vdr_wallet_client, 1) + sent1 = vdr_get_replies(looper, sent1) + #sent1 = sdk_json_to_request_object(request_couple_json[0][0]) checkPropagated(looper, txnPoolNodeSet, sent1, faultyNodes) def check(): @@ -63,11 +63,11 @@ def check(): frm = params["nodeName"] reason = params["reason"] assert frm == 'Alpha' - invalid_signatures = 'did={}, signature={}'.format(sent1.identifier, sent1.signature) + invalid_signatures = 'did={}, signature={}'.format(sent1[0][0]["identifier"], sent1[0][0]["signature"]) assert reason == InsufficientCorrectSignatures.reason.format(1, 0, 1, invalid_signatures) # ensure Alpha's propagates were ignored by the other nodes - key = sent1.digest + key = json.loads(sent1[0][1][node.name])["result"]["txn"]["metadata"]["digest"] props = node.requests[key].propagates assert 'Alpha' not in props for good in goodNodes: @@ -80,7 +80,7 @@ def check(): timeout = waits.expectedClientRequestPropagationTime(len(txnPoolNodeSet)) looper.run(eventually(check, retryWait=1, timeout=timeout)) - +#indy-sdk request manipulation test (invalid in vdr) def test_request_with_incorrect_multisig_signatures(looper, sdk_pool_handle, sdk_wallet_client, sdk_wallet_client2): req = sdk_random_request_objects(1, identifier=sdk_wallet_client[1], protocol_version=CURRENT_PROTOCOL_VERSION)[0] @@ -90,7 +90,7 @@ def test_request_with_incorrect_multisig_signatures(looper, sdk_pool_handle, sdk multisig_req = sdk_multisign_request_object(looper, sdk_wallet_client2, json.dumps(req.as_dict)) - rep1 = sdk_send_signed_requests(sdk_pool_handle, [multisig_req]) + rep1 = sdk_send_signed_requests(looper, sdk_pool_handle, [multisig_req]) invalid_signatures = 'did={}, signature={}'.format(req.identifier, req.signatures[req.identifier]) expected_error_message = 'Reason: client request invalid: {}'.\ diff --git a/plenum/test/test_dirty_read.py b/plenum/test/test_dirty_read.py index d5fafafc7a..08d32f0621 100644 --- a/plenum/test/test_dirty_read.py +++ b/plenum/test/test_dirty_read.py @@ -1,10 +1,10 @@ from plenum.common.txn_util import get_seq_no, get_payload_data -from plenum.test.helper import sdk_send_random_and_check, \ - sdk_get_and_check_replies +from plenum.test.helper import vdr_send_random_and_check, \ + vdr_get_and_check_replies from plenum.common.constants import DATA from plenum.common.messages.node_messages import Ordered -from plenum.test.pool_transactions.helper import sdk_build_get_txn_request, \ - sdk_sign_and_send_prepared_request +from plenum.test.pool_transactions.helper import vdr_build_get_txn_request, \ + vdr_sign_and_send_prepared_request from stp_core.common.log import getlogger logger = getlogger() @@ -23,7 +23,7 @@ async def serviceReplicas(limit): node.serviceReplicas = serviceReplicas -def test_dirty_read(looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_client): +def test_dirty_read(looper, txnPoolNodeSet, vdr_pool_handle, vdr_wallet_client): """ Tests the case when read request comes before write request is not executed on some nodes @@ -34,17 +34,17 @@ def test_dirty_read(looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_client): logger.debug("Making node {} slow".format(node)) make_node_slow(node) - received_replies = sdk_send_random_and_check(looper, txnPoolNodeSet, - sdk_pool_handle, - sdk_wallet_client, + received_replies = vdr_send_random_and_check(looper, txnPoolNodeSet, + vdr_pool_handle, + vdr_wallet_client, 1) result = received_replies[0][1]["result"] seq_no = get_seq_no(result) - _, did = sdk_wallet_client - req = sdk_build_get_txn_request(looper, did, seq_no) - request = sdk_sign_and_send_prepared_request(looper, sdk_wallet_client, - sdk_pool_handle, req) - received_replies = sdk_get_and_check_replies(looper, [request]) + _, did = vdr_wallet_client + req = vdr_build_get_txn_request(looper, did, seq_no) + request = vdr_sign_and_send_prepared_request(looper, vdr_wallet_client, + vdr_pool_handle, req) + received_replies = vdr_get_and_check_replies(looper, [request]) results = [str(get_payload_data(reply['result'][DATA])) for _, reply in received_replies] assert len(set(results)) == 1 diff --git a/plenum/test/test_get_txn_after_bls_key_rotation.py b/plenum/test/test_get_txn_after_bls_key_rotation.py index 0d8424af86..ad5eecec3e 100644 --- a/plenum/test/test_get_txn_after_bls_key_rotation.py +++ b/plenum/test/test_get_txn_after_bls_key_rotation.py @@ -1,6 +1,6 @@ from plenum.test.bls.helper import check_update_bls_key -from indy import ledger -from indy.did import create_and_store_my_did +from indy_vdr import ledger +from plenum.test.wallet_helper import vdr_create_and_store_did, vdr_sign_and_submit_request from plenum.test.delayers import cDelay from plenum.test.stasher import delay_rules, delay_rules_without_processing @@ -9,33 +9,33 @@ def test_get_txn_after_bls_key_rotation(looper, txnPoolNodeSet, - sdk_wallet_stewards, - sdk_wallet_trustee, - sdk_wallet_client, - sdk_pool_handle): + vdr_wallet_stewards, + vdr_wallet_trustee, + vdr_wallet_client, + vdr_pool_handle): check_update_bls_key(node_num=0, saved_multi_sigs_count=4, looper=looper, txnPoolNodeSet=txnPoolNodeSet, - sdk_wallet_stewards=sdk_wallet_stewards, - sdk_wallet_client=sdk_wallet_client, - sdk_pool_handle=sdk_pool_handle, + sdk_wallet_stewards=vdr_wallet_stewards, + sdk_wallet_client=vdr_wallet_client, + sdk_pool_handle=vdr_pool_handle, pool_refresh=False) check_update_bls_key(node_num=1, saved_multi_sigs_count=4, looper=looper, txnPoolNodeSet=txnPoolNodeSet, - sdk_wallet_stewards=sdk_wallet_stewards, - sdk_wallet_client=sdk_wallet_client, - sdk_pool_handle=sdk_pool_handle, + sdk_wallet_stewards=vdr_wallet_stewards, + sdk_wallet_client=vdr_wallet_client, + sdk_pool_handle=vdr_pool_handle, pool_refresh=False) check_update_bls_key(node_num=2, saved_multi_sigs_count=4, looper=looper, txnPoolNodeSet=txnPoolNodeSet, - sdk_wallet_stewards=sdk_wallet_stewards, - sdk_wallet_client=sdk_wallet_client, - sdk_pool_handle=sdk_pool_handle, + sdk_wallet_stewards=vdr_wallet_stewards, + sdk_wallet_client=vdr_wallet_client, + sdk_pool_handle=vdr_pool_handle, pool_refresh=False) check_update_bls_key(node_num=3, saved_multi_sigs_count=4, looper=looper, txnPoolNodeSet=txnPoolNodeSet, - sdk_wallet_stewards=sdk_wallet_stewards, - sdk_wallet_client=sdk_wallet_client, - sdk_pool_handle=sdk_pool_handle, + sdk_wallet_stewards=vdr_wallet_stewards, + sdk_wallet_client=vdr_wallet_client, + sdk_pool_handle=vdr_pool_handle, pool_refresh=False) # Stop receiving of commits in a circle, so all nodes will have different sets of multi signatures @@ -43,14 +43,14 @@ def test_get_txn_after_bls_key_rotation(looper, txnPoolNodeSet, with delay_rules_without_processing(txnPoolNodeSet[1].nodeIbStasher, cDelay(delay=1200, sender_filter=txnPoolNodeSet[0].name)): with delay_rules_without_processing(txnPoolNodeSet[2].nodeIbStasher, cDelay(delay=1200, sender_filter=txnPoolNodeSet[1].name)): with delay_rules_without_processing(txnPoolNodeSet[3].nodeIbStasher, cDelay(delay=1200, sender_filter=txnPoolNodeSet[2].name)): - did_future = create_and_store_my_did(sdk_wallet_client[0], "{}") + did_future = vdr_create_and_store_did(vdr_wallet_client[0]) did, verkey = looper.loop.run_until_complete(did_future) - nym_request_future = ledger.build_nym_request(sdk_wallet_trustee[1], did, verkey, None, None) + nym_request_future = ledger.build_nym_request(vdr_wallet_trustee[1], did, verkey, None, None) nym_request = looper.loop.run_until_complete(nym_request_future) - nym_response_future = ledger.sign_and_submit_request(sdk_pool_handle, sdk_wallet_trustee[0], sdk_wallet_trustee[1], nym_request) + nym_response_future = vdr_sign_and_submit_request(vdr_pool_handle, vdr_wallet_trustee[0], vdr_wallet_trustee[1], nym_request) looper.loop.run_until_complete(nym_response_future) - get_txn_request_future = ledger.build_get_txn_request(sdk_wallet_client[1], "DOMAIN", 1) + get_txn_request_future = ledger.build_get_txn_request(vdr_wallet_client[1], "DOMAIN", 1) get_txn_request = looper.loop.run_until_complete(get_txn_request_future) - get_txn_response_future = ledger.submit_request(sdk_pool_handle, get_txn_request) + get_txn_response_future = vdr_pool_handle.submit_request(get_txn_request) looper.loop.run_until_complete(get_txn_response_future) diff --git a/plenum/test/test_get_txn_state_proof.py b/plenum/test/test_get_txn_state_proof.py index c7be247377..e707c037d9 100644 --- a/plenum/test/test_get_txn_state_proof.py +++ b/plenum/test/test_get_txn_state_proof.py @@ -6,16 +6,17 @@ from plenum.common.constants import TXN_METADATA, TXN_METADATA_SEQ_NO, OP_FIELD_NAME from plenum.test.delayers import req_delay from plenum.test.stasher import delay_rules -from indy.did import create_and_store_my_did -from indy.ledger import build_nym_request, build_get_txn_request, sign_and_submit_request, submit_request, build_attrib_request, build_acceptance_mechanisms_request + +from plenum.test.wallet_helper import vdr_create_and_store_did, vdr_sign_and_submit_request +from indy_vdr import ledger def nym_on_ledger(looper, sdk_pool_handle, sdk_wallet_client, sdk_wallet_steward, seed=None): - did_future = create_and_store_my_did(sdk_wallet_client[0], json.dumps({"seed": seed}) if seed else "{}") + did_future = vdr_create_and_store_did(sdk_wallet_client[0], seed) did, vk = looper.loop.run_until_complete(did_future) - nym_req_future = build_nym_request(sdk_wallet_steward[1], did, vk, None, None) + nym_req_future = ledger.build_nym_request(sdk_wallet_steward[1], did, vk, None, None) nym_req = looper.loop.run_until_complete(nym_req_future) - nym_resp_future = sign_and_submit_request(sdk_pool_handle, sdk_wallet_steward[0], sdk_wallet_steward[1], nym_req) + nym_resp_future = vdr_sign_and_submit_request(sdk_pool_handle, sdk_wallet_steward[0], sdk_wallet_steward[1], nym_req) nym_resp = looper.loop.run_until_complete(nym_resp_future) nym = json.loads(nym_resp) assert nym["result"] @@ -25,9 +26,9 @@ def nym_on_ledger(looper, sdk_pool_handle, sdk_wallet_client, sdk_wallet_steward def attrib_on_ledger(looper, sdk_pool_handle, sdk_wallet_steward, sdk_client_wallet): - attrib_req_future = build_attrib_request(sdk_wallet_steward[1], sdk_client_wallet[1], None, "{}", None) + attrib_req_future = ledger.build_attrib_request(sdk_wallet_steward[1], sdk_client_wallet[1], None, "{}", None) attrib_req = looper.loop.run_until_complete(attrib_req_future) - attrib_resp_future = sign_and_submit_request(sdk_pool_handle, sdk_wallet_steward[0], sdk_wallet_steward[1], attrib_req) + attrib_resp_future = vdr_sign_and_submit_request(sdk_pool_handle, sdk_wallet_steward[0], sdk_wallet_steward[1], attrib_req) attrib_resp = looper.loop.run_until_complete(attrib_resp_future) attrib = json.loads(attrib_resp) print(attrib) @@ -39,9 +40,9 @@ def attrib_on_ledger(looper, sdk_pool_handle, sdk_wallet_steward, sdk_client_wal def aml_on_ledger(looper, sdk_pool_handle, sdk_wallet_trustee): ver = random.randint(1, 10000) - aml_req_future = build_acceptance_mechanisms_request(sdk_wallet_trustee[1], "{\"test\":\"aml\"}", str(ver), None) + aml_req_future = ledger.build_acceptance_mechanisms_request(sdk_wallet_trustee[1], "{\"test\":\"aml\"}", str(ver), None) aml_req = looper.loop.run_until_complete(aml_req_future) - aml_resp_future = sign_and_submit_request(sdk_pool_handle, sdk_wallet_trustee[0], sdk_wallet_trustee[1], aml_req) + aml_resp_future = vdr_sign_and_submit_request(sdk_pool_handle, sdk_wallet_trustee[0], sdk_wallet_trustee[1], aml_req) aml_resp = looper.loop.run_until_complete(aml_resp_future) aml = json.loads(aml_resp) assert aml["result"] @@ -55,16 +56,16 @@ def aml_on_ledger(looper, sdk_pool_handle, sdk_wallet_trustee): (['NYM', 'NYM', 'NYM'], 1, "DOMAIN"), (['NYM', 'AML', 'NYM'], 1, "CONFIG") ]) -def transactions(request, looper, sdk_pool_handle, sdk_wallet_client, sdk_wallet_steward, sdk_wallet_trustee): +def transactions(request, looper, vdr_pool_handle, vdr_wallet_client, vdr_wallet_steward, vdr_wallet_trustee): txns, for_get, id = request.param res = [] for txn in txns: seq_no = -1 if txn == 'NYM': - seq_no = nym_on_ledger(looper, sdk_pool_handle, sdk_wallet_client, sdk_wallet_steward) + seq_no = nym_on_ledger(looper, vdr_pool_handle, vdr_wallet_client, vdr_wallet_steward) elif txn == 'AML': - seq_no = aml_on_ledger(looper, sdk_pool_handle, sdk_wallet_trustee) + seq_no = aml_on_ledger(looper, vdr_pool_handle, vdr_wallet_trustee) res.append(seq_no) return res[for_get], id @@ -81,15 +82,15 @@ def nodeSetAlwaysResponding(request, txnPoolNodeSet, transactions): def sdk_get_txn(looper, sdk_pool_handle, seq_no, ledger_id): - get_txn_request_future = build_get_txn_request(None, ledger_id, seq_no) + get_txn_request_future = ledger.build_get_txn_request(None, ledger_id, seq_no) get_txn_request = looper.loop.run_until_complete(get_txn_request_future) - get_txn_response_future = submit_request(sdk_pool_handle, get_txn_request) + get_txn_response_future = sdk_pool_handle.submit_request(get_txn_request) get_txn_response = looper.loop.run_until_complete(get_txn_response_future) return get_txn_response -def test_get_txn_audit_proof(nodeSetAlwaysResponding, looper, sdk_pool_handle, transactions): +def test_get_txn_audit_proof(nodeSetAlwaysResponding, looper, vdr_pool_handle, transactions): seq_no, ledger = transactions - response = sdk_get_txn(looper, sdk_pool_handle, seq_no, ledger) + response = sdk_get_txn(looper, vdr_pool_handle, seq_no, ledger) resp_json = json.loads(response) assert resp_json[OP_FIELD_NAME] == "REPLY" diff --git a/plenum/test/test_memory_consumpion.py b/plenum/test/test_memory_consumpion.py index 1cb79bfaa1..af6123e904 100644 --- a/plenum/test/test_memory_consumpion.py +++ b/plenum/test/test_memory_consumpion.py @@ -2,24 +2,24 @@ from stp_core.common.log import getlogger from plenum.common.perf_util import get_size -from plenum.test.helper import sdk_send_random_requests -from plenum.test.pool_transactions.helper import sdk_add_new_nym +from plenum.test.helper import vdr_send_random_requests +from plenum.test.pool_transactions.helper import vdr_add_new_nym logger = getlogger() @pytest.mark.skip('Unskip if needed') -def testRequestsSize(looper, txnPoolNodeSet, sdk_pool_handle, - sdk_wallet_steward, noRetryReq): +def testRequestsSize(looper, txnPoolNodeSet, vdr_pool_handle, + vdr_wallet_steward, noRetryReq): clients = [] for i in range(4): - clients.append(sdk_add_new_nym(looper, sdk_pool_handle, sdk_wallet_steward)) + clients.append(vdr_add_new_nym(looper, vdr_pool_handle, vdr_wallet_steward)) numRequests = 250 for (_, nym) in clients: logger.debug("{} sending {} requests".format(nym, numRequests)) - sdk_send_random_requests(looper, sdk_pool_handle, - sdk_wallet_steward, numRequests) + vdr_send_random_requests(looper, vdr_pool_handle, + vdr_wallet_steward, numRequests) logger.debug("{} sent {} requests".format(nym, numRequests)) for node in txnPoolNodeSet: diff --git a/plenum/test/test_node_request.py b/plenum/test/test_node_request.py index c8e5dfdf86..dc5733e699 100644 --- a/plenum/test/test_node_request.py +++ b/plenum/test/test_node_request.py @@ -7,7 +7,7 @@ PrePrepare, Prepare, Commit from plenum.test import waits from plenum.test.greek import genNodeNames -from plenum.test.helper import sdk_send_random_and_check +from plenum.test.helper import vdr_send_random_and_check from plenum.test.test_node import TestNode, TestNodeSet, \ checkPoolReady, genNodeReg, prepareNodeSet @@ -16,11 +16,11 @@ def testReqExecWhenReturnedByMaster(looper, txnPoolNodeSet, - sdk_pool_handle, - sdk_wallet_client): - sdk_send_random_and_check(looper, txnPoolNodeSet, - sdk_pool_handle, - sdk_wallet_client, + vdr_pool_handle, + vdr_wallet_client): + vdr_send_random_and_check(looper, txnPoolNodeSet, + vdr_pool_handle, + vdr_wallet_client, 1) async def chk(): diff --git a/plenum/test/test_performance.py b/plenum/test/test_performance.py index 7c8badf4c0..58f95b4531 100644 --- a/plenum/test/test_performance.py +++ b/plenum/test/test_performance.py @@ -13,7 +13,7 @@ from stp_core.loop.eventually import eventually from plenum.common.types import HA from stp_core.common.log import getlogger, Logger -from plenum.test.helper import sdk_send_random_and_check +from plenum.test.helper import vdr_send_random_and_check from plenum.test.node_catchup.helper import waitNodeDataEquality, \ check_ledger_state from plenum.test.pool_transactions.helper import \ @@ -64,15 +64,15 @@ def change_checkpoint_freq(tconf): @skipper def test_node_load(looper, txnPoolNodeSet, - sdk_pool_handle, - sdk_wallet_client, + vdr_pool_handle, + vdr_wallet_client, capsys): client_batches = 150 txns_per_batch = 25 for i in range(client_batches): s = perf_counter() - sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle, - sdk_wallet_client, txns_per_batch) + vdr_send_random_and_check(looper, txnPoolNodeSet, vdr_pool_handle, + vdr_wallet_client, txns_per_batch) with capsys.disabled(): print('{} executed {} client txns in {:.2f} seconds'. format(i + 1, txns_per_batch, perf_counter() - s)) @@ -82,7 +82,7 @@ def test_node_load(looper, txnPoolNodeSet, def test_node_load_consistent_time(tconf, change_checkpoint_freq, disable_node_monitor_config, looper, txnPoolNodeSet, capsys, - sdk_pool_handle, sdk_wallet_client): + vdr_pool_handle, vdr_wallet_client): # One of the reason memory grows is because spylog grows client_batches = 300 @@ -109,8 +109,8 @@ def test_node_load_consistent_time(tconf, change_checkpoint_freq, for i in range(client_batches): s = perf_counter() - sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle, - sdk_wallet_client, txns_per_batch) + vdr_send_random_and_check(looper, txnPoolNodeSet, vdr_pool_handle, + vdr_wallet_client, txns_per_batch) t = perf_counter() - s with capsys.disabled(): print('{} executed {} client txns in {:.2f} seconds'. @@ -150,15 +150,15 @@ def test_node_load_consistent_time(tconf, change_checkpoint_freq, @skipper -def test_node_load_after_add(sdk_new_node_caught_up, txnPoolNodeSet, - looper, sdk_pool_handle, - sdk_wallet_client, capsys): +def test_node_load_after_add(vdr_new_node_caught_up, txnPoolNodeSet, + looper, vdr_pool_handle, + vdr_wallet_client, capsys): """ A node that restarts after some transactions should eventually get the transactions which happened while it was down :return: """ - new_node = sdk_new_node_caught_up + new_node = vdr_new_node_caught_up logger.debug("Sending requests") # Here's where we apply some load @@ -166,22 +166,22 @@ def test_node_load_after_add(sdk_new_node_caught_up, txnPoolNodeSet, txns_per_batch = 25 for i in range(client_batches): s = perf_counter() - sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle, - sdk_wallet_client, txns_per_batch) + vdr_send_random_and_check(looper, txnPoolNodeSet, vdr_pool_handle, + vdr_wallet_client, txns_per_batch) with capsys.disabled(): print('{} executed {} client txns in {:.2f} seconds'. format(i + 1, txns_per_batch, perf_counter() - s)) logger.debug("Starting the stopped node, {}".format(new_node)) - sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle, - sdk_wallet_client, 5) + vdr_send_random_and_check(looper, txnPoolNodeSet, vdr_pool_handle, + vdr_wallet_client, 5) waitNodeDataEquality(looper, new_node, *txnPoolNodeSet[:4]) @skipper -def test_node_load_after_add_then_disconnect(sdk_new_node_caught_up, txnPoolNodeSet, - tconf, looper, sdk_pool_handle, - sdk_wallet_client, +def test_node_load_after_add_then_disconnect(vdr_new_node_caught_up, txnPoolNodeSet, + tconf, looper, vdr_pool_handle, + vdr_wallet_client, tdirWithPoolTxns, allPluginsPath, capsys): """ @@ -189,7 +189,7 @@ def test_node_load_after_add_then_disconnect(sdk_new_node_caught_up, txnPoolNode transactions which happened while it was down :return: """ - new_node = sdk_new_node_caught_up + new_node = vdr_new_node_caught_up with capsys.disabled(): print("Stopping node {} with pool ledger size {}". format(new_node, new_node.poolManager.txnSeqNo)) @@ -200,8 +200,8 @@ def test_node_load_after_add_then_disconnect(sdk_new_node_caught_up, txnPoolNode txns_per_batch = 10 for i in range(client_batches): s = perf_counter() - sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle, - sdk_wallet_client, txns_per_batch) + vdr_send_random_and_check(looper, txnPoolNodeSet, vdr_pool_handle, + vdr_wallet_client, txns_per_batch) with capsys.disabled(): print('{} executed {} client txns in {:.2f} seconds'. format(i + 1, txns_per_batch, perf_counter() - s)) @@ -235,8 +235,8 @@ def test_node_load_after_add_then_disconnect(sdk_new_node_caught_up, txnPoolNode waitNodeDataEquality(looper, new_node, *txnPoolNodeSet[:4], customTimeout=timeout) - sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle, - sdk_wallet_client, 5) + vdr_send_random_and_check(looper, txnPoolNodeSet, vdr_pool_handle, + vdr_wallet_client, 5) waitNodeDataEquality(looper, new_node, *txnPoolNodeSet[:4]) @@ -253,8 +253,8 @@ def test_nodestack_contexts_are_discrete(txnPoolNodeSet): def test_node_load_after_disconnect(looper, txnPoolNodeSet, tconf, allPluginsPath, tdirWithPoolTxns, - sdk_pool_handle, - sdk_wallet_client, + vdr_pool_handle, + vdr_wallet_client, capsys): nodes = txnPoolNodeSet x = nodes[-1] @@ -270,8 +270,8 @@ def test_node_load_after_disconnect(looper, txnPoolNodeSet, tconf, txns_per_batch = 10 for i in range(client_batches): s = perf_counter() - sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle, - sdk_wallet_client, txns_per_batch) + vdr_send_random_and_check(looper, txnPoolNodeSet, vdr_pool_handle, + vdr_wallet_client, txns_per_batch) with capsys.disabled(): print('{} executed {} client txns in {:.2f} seconds'. format(i + 1, txns_per_batch, perf_counter() - s)) @@ -288,8 +288,8 @@ def test_node_load_after_disconnect(looper, txnPoolNodeSet, tconf, def test_node_load_after_one_node_drops_all_msgs( looper, txnPoolNodeSet, - sdk_pool_handle, - sdk_wallet_client, + vdr_pool_handle, + vdr_wallet_client, capsys): nodes = txnPoolNodeSet x = nodes[-1] @@ -307,8 +307,8 @@ def handleOneNodeMsg(self, wrappedMsg): txns_per_batch = 25 for i in range(client_batches): s = perf_counter() - sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle, - sdk_wallet_client, txns_per_batch) + vdr_send_random_and_check(looper, txnPoolNodeSet, vdr_pool_handle, + vdr_wallet_client, txns_per_batch) with capsys.disabled(): print('{} executed {} client txns in {:.2f} seconds'. format(i + 1, txns_per_batch, perf_counter() - s)) diff --git a/plenum/test/test_req_authenticator.py b/plenum/test/test_req_authenticator.py index 5da032093e..ec86b7b1a9 100644 --- a/plenum/test/test_req_authenticator.py +++ b/plenum/test/test_req_authenticator.py @@ -2,15 +2,15 @@ import pytest -from indy.did import key_for_did +from plenum.test.wallet_helper import vdr_get_did_signing_key from plenum.common.constants import TXN_TYPE, DATA, GET_TXN, DOMAIN_LEDGER_ID, NYM from plenum.common.exceptions import NoAuthenticatorFound from plenum.common.types import f from plenum.common.util import randomString from plenum.server.client_authn import SimpleAuthNr, CoreAuthNr from plenum.server.req_authenticator import ReqAuthenticator -from plenum.test.helper import sdk_sign_and_submit_op, sdk_send_random_and_check -from plenum.test.pool_transactions.helper import new_client_request +from plenum.test.helper import vdr_sign_and_submit_op, vdr_send_random_and_check +from plenum.test.pool_transactions.helper import vdr_new_client_request from plenum.test.stasher import delay_rules from stp_core.loop.eventually import eventually @@ -45,8 +45,8 @@ def test_authenticator_registration(pre_reqs, registration): def test_authentication(looper, pre_reqs, registration, - sdk_wallet_client, - sdk_pool_handle): + vdr_wallet_client, + vdr_pool_handle): _, core_authnr, req_authnr = pre_reqs # Remove simple_authnr @@ -59,8 +59,8 @@ def test_authentication(looper, pre_reqs, registration, DATA: 1 } # Just creating the request - req = sdk_sign_and_submit_op(looper, sdk_pool_handle, - sdk_wallet_client, op) + req = vdr_sign_and_submit_op(looper, vdr_pool_handle, + vdr_wallet_client, op) with pytest.raises(NoAuthenticatorFound): req_authnr.authenticate(req[0]) @@ -71,20 +71,20 @@ def test_authentication(looper, pre_reqs, registration, DATA: 1 } # Just creating the request - req = sdk_sign_and_submit_op(looper, sdk_pool_handle, - sdk_wallet_client, op) + req = vdr_sign_and_submit_op(looper, vdr_pool_handle, + vdr_wallet_client, op) assert set() == req_authnr.authenticate(req[0]) # identifier for write type - wh, did = sdk_wallet_client - req = new_client_request(None, randomString(), looper, sdk_wallet_client) + wh, did = vdr_wallet_client + req = vdr_new_client_request(None, randomString(), looper, vdr_wallet_client) core_authnr.addIdr(did, - looper.loop.run_until_complete(key_for_did(sdk_pool_handle, wh, did))) + looper.loop.run_until_complete(vdr_get_did_signing_key(wh, did))) assert req_authnr.authenticate(json.loads(req)) == {did, } def test_propagate_of_ordered_request_doesnt_stash_requests_in_authenticator( - looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_client): + looper, txnPoolNodeSet, vdr_pool_handle, vdr_wallet_client): # Universal delayer def stopAll(msg): @@ -98,9 +98,9 @@ def check_verified_req_list_is_empty(): lastNode = txnPoolNodeSet[-1] with delay_rules(lastNode.nodeIbStasher, stopAll), \ delay_rules(lastNode.clientIbStasher, stopAll): - sdk_send_random_and_check(looper, txnPoolNodeSet, - sdk_pool_handle, - sdk_wallet_client, 1) + vdr_send_random_and_check(looper, txnPoolNodeSet, + vdr_pool_handle, + vdr_wallet_client, 1) old_propagates = [n.spylog.count('processPropagate') for n in txnPoolNodeSet] def check_more_propagates_delivered(): diff --git a/plenum/test/test_request.py b/plenum/test/test_request.py index c401ba0d61..f329cf045a 100644 --- a/plenum/test/test_request.py +++ b/plenum/test/test_request.py @@ -3,7 +3,7 @@ from plenum.common.constants import CURRENT_PROTOCOL_VERSION from plenum.common.request import Request from plenum.common.types import OPERATION, f -from plenum.test.helper import sdk_sign_request_from_dict, sdk_multisign_request_from_dict +from plenum.test.helper import vdr_sign_request_from_dict, vdr_multisign_request_from_dict @pytest.fixture(params=['with_endorser', 'no_endorser']) @@ -16,7 +16,7 @@ def endorser(request): @pytest.fixture(params=['all', 'sig_only', 'sigs_only', 'no_protocol_vers', 'all_sdk', 'sig_only_sdk', 'sigs_only_sdk', 'no_protocol_vers_sdk', 'endorser']) -def req(request, looper, sdk_wallet_client, endorser): +def req(request, looper, vdr_wallet_client, endorser): op = {'type': '1', 'something': 'nothing'} taaa = { @@ -26,12 +26,12 @@ def req(request, looper, sdk_wallet_client, endorser): if request.param.endswith('_sdk'): request.param = request.param[:-4] if request.param == 'sigs_only': - req = sdk_multisign_request_from_dict(looper, sdk_wallet_client, + req = vdr_multisign_request_from_dict(looper, vdr_wallet_client, op, reqId=1513945121191691, taa_acceptance=taaa, endorser=endorser) else: - req = sdk_sign_request_from_dict(looper, sdk_wallet_client, + req = vdr_sign_request_from_dict(looper, vdr_wallet_client, op, reqId=1513945121191691, taa_acceptance=taaa, endorser=endorser) diff --git a/plenum/test/test_request_executed_once_and_without_failing_behind.py b/plenum/test/test_request_executed_once_and_without_failing_behind.py index a4b97032ce..6f4f09778f 100644 --- a/plenum/test/test_request_executed_once_and_without_failing_behind.py +++ b/plenum/test/test_request_executed_once_and_without_failing_behind.py @@ -1,7 +1,7 @@ from plenum.common.constants import DOMAIN_LEDGER_ID from plenum.common.messages.node_messages import Checkpoint from plenum.common.txn_util import get_req_id -from plenum.test.helper import sdk_send_random_and_check +from plenum.test.helper import vdr_send_random_and_check def set_checkpoint_faking(replica): @@ -18,8 +18,8 @@ def send(msg, stat=None): def test_request_executed_once_and_without_failing_behind(tconf, looper, txnPoolNodeSet, - sdk_pool_handle, - sdk_wallet_client): + vdr_pool_handle, + vdr_wallet_client): """ Checks that all requests executed only once and without failing behind in wrote transactions @@ -39,9 +39,9 @@ def test_request_executed_once_and_without_failing_behind(tconf, looper, for replica in node.replicas.values(): set_checkpoint_faking(replica) - replies = sdk_send_random_and_check(looper, txnPoolNodeSet, - sdk_pool_handle, - sdk_wallet_client, + replies = vdr_send_random_and_check(looper, txnPoolNodeSet, + vdr_pool_handle, + vdr_wallet_client, number_of_requests) expected = [get_req_id(reply["result"]) for _, reply in replies] diff --git a/plenum/test/test_state_regenerated_from_ledger.py b/plenum/test/test_state_regenerated_from_ledger.py index d6aeb0437d..6276ad3d1c 100644 --- a/plenum/test/test_state_regenerated_from_ledger.py +++ b/plenum/test/test_state_regenerated_from_ledger.py @@ -15,8 +15,8 @@ def test_state_regenerated_from_ledger( looper, txnPoolNodeSet, - sdk_pool_handle, - sdk_wallet_client, + vdr_pool_handle, + vdr_wallet_client, tdir, tconf, allPluginsPath): @@ -25,8 +25,8 @@ def test_state_regenerated_from_ledger( """ sent_batches = 10 send_reqs_batches_and_get_suff_replies(looper, txnPoolNodeSet, - sdk_pool_handle, - sdk_wallet_client, + vdr_pool_handle, + vdr_wallet_client, 5 * sent_batches, sent_batches) ensure_all_nodes_have_same_data(looper, txnPoolNodeSet) diff --git a/plenum/test/transactions/test_new_txn_format.py b/plenum/test/transactions/test_new_txn_format.py index 5f2e2a42a4..561625c52c 100644 --- a/plenum/test/transactions/test_new_txn_format.py +++ b/plenum/test/transactions/test_new_txn_format.py @@ -8,7 +8,7 @@ from plenum.common.txn_util import transform_to_new_format, reqToTxn, get_payload_digest, get_digest from plenum.common.types import f, OPERATION from plenum.common.util import SortedDict -from plenum.test.helper import sdk_signed_random_requests, sdk_random_request_objects, sdk_multisign_request_object +from plenum.test.helper import vdr_signed_random_requests, vdr_random_request_objects, vdr_multisign_request_object @pytest.fixture( @@ -110,9 +110,9 @@ def req_to_legacy_txn(req: Request): return txn -def test_old_txn_metadata_digest_fallback(looper, sdk_wallet_client): +def test_old_txn_metadata_digest_fallback(looper, vdr_wallet_client): # Create signed request and convert to legacy txn - req_str = sdk_signed_random_requests(looper, sdk_wallet_client, 1)[0] + req_str = vdr_signed_random_requests(looper, vdr_wallet_client, 1)[0] req = deserialize_req(req_str) txn = req_to_legacy_txn(req_str) @@ -121,11 +121,11 @@ def test_old_txn_metadata_digest_fallback(looper, sdk_wallet_client): assert get_digest(txn) == None -def test_old_txn_metadata_multisig_digest_fallback(looper, sdk_wallet_client, sdk_wallet_client2): +def test_old_txn_metadata_multisig_digest_fallback(looper, vdr_wallet_client, vdr_wallet_client2): # Create signed request and convert to legacy txn - req_str = json.dumps(sdk_random_request_objects(1, CURRENT_PROTOCOL_VERSION, sdk_wallet_client[1])[0].as_dict) - req_str = sdk_multisign_request_object(looper, sdk_wallet_client, req_str) - req_str = sdk_multisign_request_object(looper, sdk_wallet_client2, req_str) + req_str = json.dumps(vdr_random_request_objects(1, CURRENT_PROTOCOL_VERSION, vdr_wallet_client[1])[0].as_dict) + req_str = vdr_multisign_request_object(looper, vdr_wallet_client, req_str) + req_str = vdr_multisign_request_object(looper, vdr_wallet_client2, req_str) req = deserialize_req(req_str) txn = req_to_legacy_txn(req_str) diff --git a/plenum/test/transactions/test_req_to_txn.py b/plenum/test/transactions/test_req_to_txn.py index 0487c6c2b2..1c6b6b1899 100644 --- a/plenum/test/transactions/test_req_to_txn.py +++ b/plenum/test/transactions/test_req_to_txn.py @@ -5,7 +5,7 @@ from plenum.common.txn_util import reqToTxn, append_txn_metadata from plenum.common.types import OPERATION, f from plenum.common.util import SortedDict -from plenum.test.helper import sdk_sign_request_from_dict, sdk_multisign_request_from_dict +from plenum.test.helper import vdr_sign_request_from_dict, vdr_multisign_request_from_dict @pytest.fixture(params=['with_endorser', 'no_endorser']) @@ -17,7 +17,7 @@ def endorser(request): @pytest.fixture(params=['all', 'sig_only', 'sigs_only', 'no_protocol_vers', 'custom_version', 'all_sdk', 'sig_only_sdk', 'sigs_only_sdk', 'no_protocol_vers_sdk']) -def req_and_expected(request, looper, sdk_wallet_client, endorser): +def req_and_expected(request, looper, vdr_wallet_client, endorser): op = {'type': '1', 'something': 'nothing'} @@ -33,12 +33,12 @@ def req_and_expected(request, looper, sdk_wallet_client, endorser): if request.param.endswith('_sdk'): request.param = request.param[:-4] if request.param == 'sigs_only': - req = sdk_multisign_request_from_dict(looper, sdk_wallet_client, + req = vdr_multisign_request_from_dict(looper, vdr_wallet_client, op, reqId=1513945121191691, taa_acceptance=taaa, endorser=endorser) else: - req = sdk_sign_request_from_dict(looper, sdk_wallet_client, + req = vdr_sign_request_from_dict(looper, vdr_wallet_client, op, reqId=1513945121191691, taa_acceptance=taaa, endorser=endorser) diff --git a/plenum/test/txn_author_agreement/acceptance/conftest.py b/plenum/test/txn_author_agreement/acceptance/conftest.py index 3f37120971..1284839830 100644 --- a/plenum/test/txn_author_agreement/acceptance/conftest.py +++ b/plenum/test/txn_author_agreement/acceptance/conftest.py @@ -11,7 +11,7 @@ InvalidClientTaaAcceptanceError, RequestRejectedException ) -from plenum.test.helper import sdk_send_and_check +from plenum.test.helper import vdr_send_and_check from plenum.test.node_catchup.helper import ensure_all_nodes_have_same_data from .helper import ( build_nym_request, build_node_request, @@ -33,7 +33,7 @@ class ValidationType(Enum): @pytest.fixture(scope='module') def activate_taa( set_txn_author_agreement_aml, set_txn_author_agreement, - sdk_wallet_trustee, sdk_wallet_new_steward, sdk_wallet_client + vdr_wallet_trustee, vdr_wallet_new_steward, vdr_wallet_client ): return set_txn_author_agreement() @@ -61,10 +61,10 @@ def wrapped(signed_req_dict): @pytest.fixture(scope='module') -def validate_taa_acceptance_txn_api(looper, txnPoolNodeSet, sdk_pool_handle): +def validate_taa_acceptance_txn_api(looper, txnPoolNodeSet, vdr_pool_handle): def wrapped(signed_req_dict): signed_req_json = json.dumps(signed_req_dict) - sdk_send_and_check([signed_req_json], looper, txnPoolNodeSet, sdk_pool_handle)[0] + vdr_send_and_check([signed_req_json], looper, txnPoolNodeSet, vdr_pool_handle)[0] ensure_all_nodes_have_same_data(looper, txnPoolNodeSet) return wrapped @@ -133,13 +133,13 @@ def validation_error(validation_type): @pytest.fixture def validate_taa_acceptance( looper, - sdk_wallet_new_steward, + vdr_wallet_new_steward, validate_taa_acceptance_func_api, validate_taa_acceptance_txn_api, validation_type, ): def wrapped(req_dict): - req_dict = sign_request_dict(looper, sdk_wallet_new_steward, req_dict) + req_dict = sign_request_dict(looper, vdr_wallet_new_steward, req_dict) { ValidationType.FuncApi: validate_taa_acceptance_func_api, ValidationType.TxnApi: validate_taa_acceptance_txn_api @@ -196,13 +196,13 @@ def taa_acceptance( @pytest.fixture -def domain_request_json(looper, sdk_wallet_new_steward): - return build_nym_request(looper, sdk_wallet_new_steward) +def domain_request_json(looper, vdr_wallet_new_steward): + return build_nym_request(looper, vdr_wallet_new_steward) @pytest.fixture -def pool_request_json(looper, tconf, tdir, sdk_wallet_new_steward): - return build_node_request(looper, tconf, tdir, sdk_wallet_new_steward) +def pool_request_json(looper, tconf, tdir, vdr_wallet_new_steward): + return build_node_request(looper, tconf, tdir, vdr_wallet_new_steward) @pytest.fixture diff --git a/plenum/test/txn_author_agreement/acceptance/helper.py b/plenum/test/txn_author_agreement/acceptance/helper.py index 43824cf9b2..cd6e48b72b 100644 --- a/plenum/test/txn_author_agreement/acceptance/helper.py +++ b/plenum/test/txn_author_agreement/acceptance/helper.py @@ -1,19 +1,19 @@ import json -from indy.ledger import ( - append_txn_author_agreement_acceptance_to_request, sign_request -) +from plenum.test.wallet_helper import vdr_sign_request +from indy_vdr import ledger +# Look at prepare_txn_author_agreement_acceptance in ledger from vrd. Says to use `Request.set_txn_author_agreement_acceptance` to append to the request from plenum.common.util import randomString from plenum.test.pool_transactions.helper import ( - prepare_nym_request, prepare_new_node_data, prepare_node_request + vdr_prepare_nym_request, prepare_new_node_data, vdr_prepare_node_request ) # TODO makes sense to make more generic and move to upper level helper def build_nym_request(looper, sdk_wallet): return looper.loop.run_until_complete( - prepare_nym_request( + vdr_prepare_nym_request( sdk_wallet, named_seed=randomString(32), alias=randomString(5), @@ -30,7 +30,7 @@ def build_node_request(looper, tconf, tdir, sdk_wallet): _, steward_did = sdk_wallet node_request = looper.loop.run_until_complete( - prepare_node_request(steward_did, + vdr_prepare_node_request(steward_did, new_node_name=new_node_name, clientIp=clientIp, clientPort=clientPort, @@ -51,20 +51,18 @@ def add_taa_acceptance( taa_acceptance_mech, taa_acceptance_time ): - return looper.loop.run_until_complete( - append_txn_author_agreement_acceptance_to_request( - request_json, - text=taa_text, + req = ledger.prepare_txn_author_agreement_acceptance(text=taa_text, version=taa_version, taa_digest=None, mechanism=taa_acceptance_mech, - time=taa_acceptance_time - ) + accepted_time=taa_acceptance_time) + return looper.loop.run_until_complete(req.set_txn_author_agreement_acceptance( + request_json) ) def sign_request_dict(looper, sdk_wallet, req_dict): wallet_h, did = sdk_wallet req_json = looper.loop.run_until_complete( - sign_request(wallet_h, did, json.dumps(req_dict))) + vdr_sign_request(wallet_h, did, json.dumps(req_dict))) return json.loads(req_json) diff --git a/plenum/test/txn_author_agreement/acceptance/test_taa_acceptance_integration_validation.py b/plenum/test/txn_author_agreement/acceptance/test_taa_acceptance_integration_validation.py index 78bccbf74b..8e0388d1ce 100644 --- a/plenum/test/txn_author_agreement/acceptance/test_taa_acceptance_integration_validation.py +++ b/plenum/test/txn_author_agreement/acceptance/test_taa_acceptance_integration_validation.py @@ -1,7 +1,7 @@ import pytest from plenum.common.types import f from plenum.common.constants import TXN_PAYLOAD, TXN_PAYLOAD_METADATA -from plenum.test.helper import sdk_get_and_check_replies, sdk_sign_and_submit_req +from plenum.test.helper import vdr_get_and_check_replies, vdr_sign_and_submit_req SEC_PER_DAY = 24 * 60 * 60 @@ -17,8 +17,8 @@ def _check_taa_time_correct(taa_acceptance): def test_request_with_invalid_taa_acceptance_time(set_txn_author_agreement, add_taa_acceptance, - sdk_wallet_new_steward, - sdk_pool_handle, + vdr_wallet_new_steward, + vdr_pool_handle, looper): taa_data = set_txn_author_agreement() request_json = add_taa_acceptance( @@ -27,7 +27,7 @@ def test_request_with_invalid_taa_acceptance_time(set_txn_author_agreement, taa_a_time=taa_data.txn_time + (0 if taa_data.txn_time % SEC_PER_DAY != 0 else 1) ) - req = sdk_sign_and_submit_req(sdk_pool_handle, sdk_wallet_new_steward, request_json) - resp = sdk_get_and_check_replies(looper, [req]) + req = vdr_sign_and_submit_req(looper, vdr_pool_handle, vdr_wallet_new_steward, request_json) + resp = vdr_get_and_check_replies(looper, [req]) _check_taa_time_correct(resp[0][0]) _check_taa_time_correct(resp[0][1]["result"][TXN_PAYLOAD][TXN_PAYLOAD_METADATA]) diff --git a/plenum/test/txn_author_agreement/acceptance/test_taa_acceptance_validation.py b/plenum/test/txn_author_agreement/acceptance/test_taa_acceptance_validation.py index 2a400b4638..e620850522 100644 --- a/plenum/test/txn_author_agreement/acceptance/test_taa_acceptance_validation.py +++ b/plenum/test/txn_author_agreement/acceptance/test_taa_acceptance_validation.py @@ -4,14 +4,14 @@ import json from random import randint -from indy.ledger import build_txn_author_agreement_request +from indy_vdr.ledger import build_txn_author_agreement_request from plenum.common.util import get_utc_epoch from plenum.test.delayers import cDelay from plenum.common.types import f from plenum.common.constants import AML, DOMAIN_LEDGER_ID -from plenum.test.helper import sdk_sign_and_submit_req +from plenum.test.helper import vdr_sign_and_submit_req from plenum.test.stasher import delay_rules from plenum.test.txn_author_agreement.helper import calc_taa_digest, sdk_send_txn_author_agreement_disable, \ @@ -267,17 +267,17 @@ def test_taa_acceptance_valid( def test_taa_acceptance_valid_on_uncommitted( validate_taa_acceptance_func_api, - txnPoolNodeSet, looper, sdk_wallet_trustee, sdk_pool_handle, + txnPoolNodeSet, looper, vdr_wallet_trustee, vdr_pool_handle, add_taa_acceptance ): text, version = gen_random_txn_author_agreement() old_pp_seq_no = txnPoolNodeSet[0].master_replica.last_ordered_3pc[1] with delay_rules([n.nodeIbStasher for n in txnPoolNodeSet], cDelay()): - req = looper.loop.run_until_complete(build_txn_author_agreement_request(sdk_wallet_trustee[1], + req = looper.loop.run_until_complete(build_txn_author_agreement_request(vdr_wallet_trustee[1], text, version, ratification_ts=get_utc_epoch() - 600)) - req = sdk_sign_and_submit_req(sdk_pool_handle, sdk_wallet_trustee, req) + req = vdr_sign_and_submit_req(looper, vdr_pool_handle, vdr_wallet_trustee, req) def check(): assert old_pp_seq_no + 1 == txnPoolNodeSet[0].master_replica._consensus_data.preprepared[-1].pp_seq_no @@ -296,7 +296,7 @@ def test_taa_acceptance_allowed_when_disabled( validate_taa_acceptance, validation_error, set_txn_author_agreement, - add_taa_acceptance, looper, sdk_pool_handle, sdk_wallet_trustee + add_taa_acceptance, looper, vdr_pool_handle, vdr_wallet_trustee ): taa_data = set_txn_author_agreement() request_json = add_taa_acceptance( @@ -309,7 +309,7 @@ def test_taa_acceptance_allowed_when_disabled( validate_taa_acceptance(request_dict) # disable TAA acceptance - sdk_send_txn_author_agreement_disable(looper, sdk_pool_handle, sdk_wallet_trustee) + sdk_send_txn_author_agreement_disable(looper, vdr_pool_handle, vdr_wallet_trustee) # formally valid TAA acceptance request_json = add_taa_acceptance( @@ -337,7 +337,7 @@ def test_taa_acceptance_retired( validate_taa_acceptance, validation_error, turn_off_freshness_state_update, request_dict, latest_taa, - looper, sdk_pool_handle, sdk_wallet_trustee, set_txn_author_agreement + looper, vdr_pool_handle, vdr_wallet_trustee, set_txn_author_agreement ): # Create new txn author agreement set_txn_author_agreement() diff --git a/plenum/test/txn_author_agreement/conftest.py b/plenum/test/txn_author_agreement/conftest.py index 0c083aa55f..32ffe0855d 100644 --- a/plenum/test/txn_author_agreement/conftest.py +++ b/plenum/test/txn_author_agreement/conftest.py @@ -2,7 +2,7 @@ from copy import deepcopy import pytest -from indy.ledger import build_acceptance_mechanisms_request +from indy_vdr.ledger import build_acceptance_mechanisms_request from common.serializers.serialization import config_state_serializer from plenum.server.database_manager import DatabaseManager @@ -24,9 +24,9 @@ TaaData, expected_state_data, expected_data, TaaAmlData, expected_aml_data) -from plenum.test.helper import sdk_get_and_check_replies, get_handler_by_type_wm +from plenum.test.helper import vdr_get_and_check_replies, get_handler_by_type_wm from plenum.test.node_catchup.helper import ensure_all_nodes_have_same_data -from plenum.test.pool_transactions.helper import sdk_sign_and_send_prepared_request +from plenum.test.pool_transactions.helper import vdr_sign_and_send_prepared_request from .helper import ( set_txn_author_agreement as _set_txn_author_agreement, get_txn_author_agreement as _get_txn_author_agreement, @@ -75,9 +75,9 @@ def taa_aml_handler(write_manager): @pytest.fixture(scope='module') -def aml_request_kwargs(sdk_wallet_trustee): +def aml_request_kwargs(vdr_wallet_trustee): return dict( - identifier=sdk_wallet_trustee[1], + identifier=vdr_wallet_trustee[1], reqId=5, protocolVersion=CURRENT_PROTOCOL_VERSION, operation={ @@ -95,7 +95,7 @@ def aml_request_kwargs(sdk_wallet_trustee): # Note. sdk_pool_handle is necessary since it sets proper # Protocol Version for requests @pytest.fixture(scope="module") -def taa_aml_request_module(looper, aml_request_kwargs, sdk_pool_handle): +def taa_aml_request_module(looper, aml_request_kwargs, vdr_pool_handle): res = looper.loop.run_until_complete( build_acceptance_mechanisms_request( aml_request_kwargs['identifier'], @@ -108,7 +108,7 @@ def taa_aml_request_module(looper, aml_request_kwargs, sdk_pool_handle): @pytest.fixture(scope="function") -def taa_aml_request(looper, aml_request_kwargs, sdk_pool_handle): +def taa_aml_request(looper, aml_request_kwargs, vdr_pool_handle): aml_request_kwargs = deepcopy(aml_request_kwargs) aml_request_kwargs['operation'][AML_VERSION] = randomString() aml_request_kwargs['operation'][AML_CONTEXT] = randomString() @@ -126,23 +126,23 @@ def taa_aml_request(looper, aml_request_kwargs, sdk_pool_handle): @pytest.fixture(scope="module") def set_txn_author_agreement_aml( looper, txnPoolNodeSet, taa_aml_request_module, - sdk_pool_handle, sdk_wallet_trustee + vdr_pool_handle, vdr_wallet_trustee ): - req = sdk_sign_and_send_prepared_request( - looper, sdk_wallet_trustee, sdk_pool_handle, taa_aml_request_module) - return sdk_get_and_check_replies(looper, [req])[0] + req = vdr_sign_and_send_prepared_request( + looper, vdr_wallet_trustee, vdr_pool_handle, taa_aml_request_module) + return vdr_get_and_check_replies(looper, [req])[0] @pytest.fixture(scope='module') def set_txn_author_agreement( - looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_trustee + looper, txnPoolNodeSet, vdr_pool_handle, vdr_wallet_trustee ): def wrapped(text=None, version=None, retired=None, ratified=None): random_taa = gen_random_txn_author_agreement() text = random_taa[0] if text is None else text version = random_taa[1] if version is None else version ratified = get_utc_epoch() - 600 if ratified is None else ratified - res = _set_txn_author_agreement(looper, sdk_pool_handle, sdk_wallet_trustee, text, version, ratified, retired) + res = _set_txn_author_agreement(looper, vdr_pool_handle, vdr_wallet_trustee, text, version, ratified, retired) ensure_all_nodes_have_same_data(looper, txnPoolNodeSet) return res @@ -151,11 +151,11 @@ def wrapped(text=None, version=None, retired=None, ratified=None): @pytest.fixture(scope='module') def get_txn_author_agreement( - looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_client + looper, txnPoolNodeSet, vdr_pool_handle, vdr_wallet_client ): def wrapped(digest=None, version=None, timestamp=None): return _get_txn_author_agreement( - looper, sdk_pool_handle, sdk_wallet_client, + looper, vdr_pool_handle, vdr_wallet_client, digest=digest, version=version, timestamp=timestamp ) diff --git a/plenum/test/txn_author_agreement/helper.py b/plenum/test/txn_author_agreement/helper.py index 37b82a8fcf..7ae4c6ec0f 100644 --- a/plenum/test/txn_author_agreement/helper.py +++ b/plenum/test/txn_author_agreement/helper.py @@ -3,7 +3,7 @@ from _sha256 import sha256 import base58 -from indy.ledger import build_txn_author_agreement_request, build_get_txn_author_agreement_request, \ +from indy_vdr.ledger import build_txn_author_agreement_request, build_get_txn_author_agreement_request, \ build_get_acceptance_mechanisms_request, build_disable_all_txn_author_agreements_request from typing import NamedTuple, Dict, Optional @@ -21,7 +21,7 @@ from plenum.server.request_handlers.static_taa_helper import StaticTAAHelper from plenum.server.request_handlers.txn_author_agreement_aml_handler import TxnAuthorAgreementAmlHandler from plenum.server.request_managers.write_request_manager import WriteRequestManager -from plenum.test.helper import sdk_sign_and_submit_req, sdk_get_and_check_replies, sdk_sign_and_submit_op +from plenum.test.helper import vdr_sign_and_submit_req, vdr_get_and_check_replies, vdr_sign_and_submit_op from state.pruning_state import PruningState TaaData = NamedTuple("TaaData", [ @@ -47,14 +47,14 @@ def sdk_send_txn_author_agreement(looper, sdk_pool_handle, sdk_wallet, version: retired: Optional[int] = None): req = looper.loop.run_until_complete(build_txn_author_agreement_request(sdk_wallet[1], text, version, ratified, retired)) - rep = sdk_sign_and_submit_req(sdk_pool_handle, sdk_wallet, req) - return sdk_get_and_check_replies(looper, [rep])[0] + rep = vdr_sign_and_submit_req(looper, sdk_pool_handle, sdk_wallet, req) + return vdr_get_and_check_replies(looper, [rep])[0] def sdk_send_txn_author_agreement_disable(looper, sdk_pool_handle, sdk_wallet): req = looper.loop.run_until_complete(build_disable_all_txn_author_agreements_request(sdk_wallet[1])) - rep = sdk_sign_and_submit_req(sdk_pool_handle, sdk_wallet, req) - return sdk_get_and_check_replies(looper, [rep])[0] + rep = vdr_sign_and_submit_req(looper, sdk_pool_handle, sdk_wallet, req) + return vdr_get_and_check_replies(looper, [rep])[0] def set_txn_author_agreement( @@ -87,16 +87,16 @@ def sdk_get_txn_author_agreement(looper, sdk_pool_handle, sdk_wallet, if timestamp is not None: params['timestamp'] = timestamp req = looper.loop.run_until_complete(build_get_txn_author_agreement_request(sdk_wallet[1], json.dumps(params))) - rep = sdk_sign_and_submit_req(sdk_pool_handle, sdk_wallet, req) - return sdk_get_and_check_replies(looper, [rep])[0] + rep = vdr_sign_and_submit_req(looper, sdk_pool_handle, sdk_wallet, req) + return vdr_get_and_check_replies(looper, [rep])[0] def sdk_get_taa_aml(looper, sdk_pool_handle, sdk_wallet, version: Optional[str] = None, timestamp: Optional[int] = None): req = looper.loop.run_until_complete(build_get_acceptance_mechanisms_request(sdk_wallet[1], timestamp, version)) - rep = sdk_sign_and_submit_req(sdk_pool_handle, sdk_wallet, req) - return sdk_get_and_check_replies(looper, [rep])[0] + rep = vdr_sign_and_submit_req(looper, sdk_pool_handle, sdk_wallet, req) + return vdr_get_and_check_replies(looper, [rep])[0] def get_txn_author_agreement( diff --git a/plenum/test/txn_author_agreement/test_config_req_handler_taa_utils.py b/plenum/test/txn_author_agreement/test_config_req_handler_taa_utils.py index dbbdda45ee..b999712253 100644 --- a/plenum/test/txn_author_agreement/test_config_req_handler_taa_utils.py +++ b/plenum/test/txn_author_agreement/test_config_req_handler_taa_utils.py @@ -31,12 +31,12 @@ def test_state_path_taa_aml_version(): assert WriteRequestManager._state_path_taa_aml_version('some_version') == b'3:v:some_version' -def test_is_trustee(txnPoolNodeSet, sdk_wallet_trustee, sdk_wallet_steward, sdk_wallet_client): +def test_is_trustee(txnPoolNodeSet, vdr_wallet_trustee, vdr_wallet_steward, vdr_wallet_client): aml_req_handler = get_aml_req_handler(txnPoolNodeSet[0]) state = aml_req_handler.database_manager.get_database(DOMAIN_LEDGER_ID).state - assert is_trustee(state, sdk_wallet_trustee[1]) - assert not is_trustee(state, sdk_wallet_steward[1]) - assert not is_trustee(state, sdk_wallet_client[1]) + assert is_trustee(state, vdr_wallet_trustee[1]) + assert not is_trustee(state, vdr_wallet_steward[1]) + assert not is_trustee(state, vdr_wallet_client[1]) def test_add_txn_author_agreement(taa_handler, write_manager, taa_input_data, diff --git a/plenum/test/txn_author_agreement/test_get_empty_txn_author_agreement.py b/plenum/test/txn_author_agreement/test_get_empty_txn_author_agreement.py index 310aadda0e..71a324138b 100644 --- a/plenum/test/txn_author_agreement/test_get_empty_txn_author_agreement.py +++ b/plenum/test/txn_author_agreement/test_get_empty_txn_author_agreement.py @@ -1,5 +1,5 @@ import pytest -from indy.error import CommonInvalidParam3 +from indy_vdr.error import VdrErrorCode, VdrError from plenum.common.constants import REPLY, CONFIG_LEDGER_ID from plenum.common.exceptions import RequestNackedException, CommonSdkIOException @@ -43,8 +43,8 @@ def nodeSetWithoutTaa(request, nodeSetWithoutTaaAlwaysResponding): ({'timestamp': TIMESTAMP_NONE}, '2:latest') ]) def test_get_txn_author_agreement_works_on_clear_state(params, state_key, looper, nodeSetWithoutTaa, - sdk_pool_handle, sdk_wallet_client): - reply = sdk_get_txn_author_agreement(looper, sdk_pool_handle, sdk_wallet_client, **params)[1] + vdr_pool_handle, vdr_wallet_client): + reply = sdk_get_txn_author_agreement(looper, vdr_pool_handle, vdr_wallet_client, **params)[1] assert reply['op'] == REPLY result = reply['result'] @@ -59,6 +59,6 @@ def test_get_txn_author_agreement_works_on_clear_state(params, state_key, looper {'digest': 'some_digest', 'version': 'some_version', 'timestamp': 374273} ]) def test_get_txn_author_agreement_cannot_have_more_than_one_parameter(params, looper, nodeSetWithoutTaa, - sdk_pool_handle, sdk_wallet_client): - with pytest.raises(CommonInvalidParam3) as e: - sdk_get_txn_author_agreement(looper, sdk_pool_handle, sdk_wallet_client, **params) + vdr_pool_handle, vdr_wallet_client): + with pytest.raises(VdrError(code=VdrErrorCode.UNEXPECTED)) as e: + sdk_get_txn_author_agreement(looper, vdr_pool_handle, vdr_wallet_client, **params) diff --git a/plenum/test/txn_author_agreement/test_get_taa_aml.py b/plenum/test/txn_author_agreement/test_get_taa_aml.py index b998f6577d..ca518a6736 100644 --- a/plenum/test/txn_author_agreement/test_get_taa_aml.py +++ b/plenum/test/txn_author_agreement/test_get_taa_aml.py @@ -3,11 +3,11 @@ from typing import Optional import pytest -from indy.ledger import build_acceptance_mechanisms_request +from indy_vdr.ledger import build_acceptance_mechanisms_request from plenum.common.exceptions import RequestNackedException from plenum.common.types import OPERATION, f -from plenum.test.pool_transactions.helper import sdk_sign_and_send_prepared_request +from plenum.test.pool_transactions.helper import vdr_sign_and_send_prepared_request from common.serializers.json_serializer import JsonSerializer @@ -16,7 +16,7 @@ GET_TXN_AUTHOR_AGREEMENT_AML, CURRENT_PROTOCOL_VERSION, TXN_AUTHOR_AGREEMENT_RETIREMENT_TS, TXN_AUTHOR_AGREEMENT_DIGEST from plenum.common.util import randomString from plenum.test.delayers import req_delay -from plenum.test.helper import sdk_get_and_check_replies +from plenum.test.helper import vdr_get_and_check_replies from plenum.test.stasher import delay_rules from plenum.test.txn_author_agreement.helper import check_state_proof, sdk_get_taa_aml, sdk_send_txn_author_agreement @@ -36,25 +36,25 @@ def send_aml_request(looper, sdk_wallet_trustee, sdk_pool_handle, version, aml, sdk_wallet_trustee[1], aml, version, context)) - req = sdk_sign_and_send_prepared_request(looper, sdk_wallet_trustee, sdk_pool_handle, req) - return sdk_get_and_check_replies(looper, [req])[0] + req = vdr_sign_and_send_prepared_request(looper, sdk_wallet_trustee, sdk_pool_handle, req) + return vdr_get_and_check_replies(looper, [req])[0] @pytest.fixture(scope='module') -def nodeSetWithTaaAlwaysResponding(txnPoolNodeSet, looper, sdk_pool_handle, - sdk_wallet_trustee): +def nodeSetWithTaaAlwaysResponding(txnPoolNodeSet, looper, vdr_pool_handle, + vdr_wallet_trustee): global TIMESTAMP_V1, TIMESTAMP_V2 # Force signing empty config state txnPoolNodeSet[0].master_replica._ordering_service._do_send_3pc_batch(ledger_id=CONFIG_LEDGER_ID) looper.runFor(3) # Make sure we have long enough gap between updates - reply = send_aml_request(looper, sdk_wallet_trustee, sdk_pool_handle, version=V1, aml=json.dumps(AML1), + reply = send_aml_request(looper, vdr_wallet_trustee, vdr_pool_handle, version=V1, aml=json.dumps(AML1), context=CONTEXT1) TIMESTAMP_V1 = reply[1]['result'][TXN_METADATA][TXN_METADATA_TIME] looper.runFor(3) # Make sure we have long enough gap between updates - reply = send_aml_request(looper, sdk_wallet_trustee, sdk_pool_handle, version=V2, aml=json.dumps(AML2), + reply = send_aml_request(looper, vdr_wallet_trustee, vdr_pool_handle, version=V2, aml=json.dumps(AML2), context=CONTEXT2) TIMESTAMP_V2 = reply[1]['result'][TXN_METADATA][TXN_METADATA_TIME] @@ -98,32 +98,32 @@ def taa_aml_value(result, version, aml, context): }) -def test_get_taa_aml_static_validation_fails(looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_client): +def test_get_taa_aml_static_validation_fails(looper, txnPoolNodeSet, vdr_pool_handle, vdr_wallet_client): req = { OPERATION: { TXN_TYPE: GET_TXN_AUTHOR_AGREEMENT_AML, 'timestamp': randint(1, 2147483647), 'version': randomString() }, - f.IDENTIFIER.nm: sdk_wallet_client[1], + f.IDENTIFIER.nm: vdr_wallet_client[1], f.REQ_ID.nm: randint(1, 2147483647), f.PROTOCOL_VERSION.nm: CURRENT_PROTOCOL_VERSION } - rep = sdk_sign_and_send_prepared_request(looper, sdk_wallet_client, sdk_pool_handle, json.dumps(req)) + rep = vdr_sign_and_send_prepared_request(looper, vdr_wallet_client, vdr_pool_handle, json.dumps(req)) with pytest.raises(RequestNackedException) as e: - sdk_get_and_check_replies(looper, [rep]) + vdr_get_and_check_replies(looper, [rep]) e.match('cannot be used in GET_TXN_AUTHOR_AGREEMENT_AML request together') -def test_get_taa_aml_works_on_clear_state(looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_client): - reply = sdk_get_taa_aml(looper, sdk_pool_handle, sdk_wallet_client)[1] +def test_get_taa_aml_works_on_clear_state(looper, txnPoolNodeSet, vdr_pool_handle, vdr_wallet_client): + reply = sdk_get_taa_aml(looper, vdr_pool_handle, vdr_wallet_client)[1] assert reply['op'] == REPLY assert reply['result']['data'] is None def test_get_taa_aml_returns_latest_taa_by_default(looper, nodeSetWithTaa, - sdk_pool_handle, sdk_wallet_client): - reply = sdk_get_taa_aml(looper, sdk_pool_handle, sdk_wallet_client)[1] + vdr_pool_handle, vdr_wallet_client): + reply = sdk_get_taa_aml(looper, vdr_pool_handle, vdr_wallet_client)[1] assert reply['op'] == REPLY result = reply['result'] @@ -132,8 +132,8 @@ def test_get_taa_aml_returns_latest_taa_by_default(looper, nodeSetWithTaa, def test_get_taa_aml_can_return_taa_for_old_version(looper, nodeSetWithTaa, - sdk_pool_handle, sdk_wallet_client): - reply = sdk_get_taa_aml(looper, sdk_pool_handle, sdk_wallet_client, version=V1)[1] + vdr_pool_handle, vdr_wallet_client): + reply = sdk_get_taa_aml(looper, vdr_pool_handle, vdr_wallet_client, version=V1)[1] assert reply['op'] == REPLY @@ -143,8 +143,8 @@ def test_get_taa_aml_can_return_taa_for_old_version(looper, nodeSetWithTaa, def test_get_taa_aml_can_return_taa_for_current_version(looper, nodeSetWithTaa, - sdk_pool_handle, sdk_wallet_client): - reply = sdk_get_taa_aml(looper, sdk_pool_handle, sdk_wallet_client, version=V2)[1] + vdr_pool_handle, vdr_wallet_client): + reply = sdk_get_taa_aml(looper, vdr_pool_handle, vdr_wallet_client, version=V2)[1] assert reply['op'] == REPLY @@ -154,9 +154,9 @@ def test_get_taa_aml_can_return_taa_for_current_version(looper, nodeSetWithTaa, def test_get_taa_aml_doesnt_return_taa_for_nonexistent_version(looper, nodeSetWithTaa, - sdk_pool_handle, sdk_wallet_client): + vdr_pool_handle, vdr_wallet_client): invalid_version = randomString(16) - reply = sdk_get_taa_aml(looper, sdk_pool_handle, sdk_wallet_client, + reply = sdk_get_taa_aml(looper, vdr_pool_handle, vdr_wallet_client, version=invalid_version)[1] assert reply['op'] == REPLY @@ -166,8 +166,8 @@ def test_get_taa_aml_doesnt_return_taa_for_nonexistent_version(looper, nodeSetWi def test_get_taa_aml_can_return_taa_aml_for_old_ts(looper, nodeSetWithTaa, - sdk_pool_handle, sdk_wallet_client): - reply = sdk_get_taa_aml(looper, sdk_pool_handle, sdk_wallet_client, + vdr_pool_handle, vdr_wallet_client): + reply = sdk_get_taa_aml(looper, vdr_pool_handle, vdr_wallet_client, timestamp=TIMESTAMP_V2 - 2)[1] assert reply['op'] == REPLY @@ -177,8 +177,8 @@ def test_get_taa_aml_can_return_taa_aml_for_old_ts(looper, nodeSetWithTaa, def test_get_taa_aml_can_return_taa_aml_for_fresh_ts(looper, nodeSetWithTaa, - sdk_pool_handle, sdk_wallet_client): - reply = sdk_get_taa_aml(looper, sdk_pool_handle, sdk_wallet_client, + vdr_pool_handle, vdr_wallet_client): + reply = sdk_get_taa_aml(looper, vdr_pool_handle, vdr_wallet_client, timestamp=TIMESTAMP_V2 + 2)[1] assert reply['op'] == REPLY @@ -189,8 +189,8 @@ def test_get_taa_aml_can_return_taa_aml_for_fresh_ts(looper, nodeSetWithTaa, # TODO: Change to nodeSetWithTaa when SDK will support this case def test_get_taa_aml_doesnt_return_taa_aml_when_it_didnt_exist(looper, nodeSetWithTaaAlwaysResponding, - sdk_pool_handle, sdk_wallet_client): - reply = sdk_get_taa_aml(looper, sdk_pool_handle, sdk_wallet_client, + vdr_pool_handle, vdr_wallet_client): + reply = sdk_get_taa_aml(looper, vdr_pool_handle, vdr_wallet_client, timestamp=TIMESTAMP_V1 - 3)[1] assert reply['op'] == REPLY diff --git a/plenum/test/txn_author_agreement/test_get_txn_author_agreement.py b/plenum/test/txn_author_agreement/test_get_txn_author_agreement.py index b31feaf6ae..b5a1338973 100644 --- a/plenum/test/txn_author_agreement/test_get_txn_author_agreement.py +++ b/plenum/test/txn_author_agreement/test_get_txn_author_agreement.py @@ -26,22 +26,22 @@ @pytest.fixture(scope='module') -def nodeSetWithTaaAlwaysResponding(txnPoolNodeSet, set_txn_author_agreement_aml, looper, sdk_pool_handle, - sdk_wallet_trustee): +def nodeSetWithTaaAlwaysResponding(txnPoolNodeSet, set_txn_author_agreement_aml, looper, vdr_pool_handle, + vdr_wallet_trustee): global RATIFIED_V1, RATIFIED_V2 global TIMESTAMP_V1, TIMESTAMP_V2 looper.runFor(3) # Make sure we have long enough gap between updates RATIFIED_V1 = get_utc_epoch() - 30 - reply = sdk_send_txn_author_agreement(looper, sdk_pool_handle, sdk_wallet_trustee, V1, TEXT_V1, RATIFIED_V1) + reply = sdk_send_txn_author_agreement(looper, vdr_pool_handle, vdr_wallet_trustee, V1, TEXT_V1, RATIFIED_V1) TIMESTAMP_V1 = reply[1]['result'][TXN_METADATA][TXN_METADATA_TIME] looper.runFor(3) # Make sure we have long enough gap between updates RATIFIED_V2 = get_utc_epoch() - 30 - reply = sdk_send_txn_author_agreement(looper, sdk_pool_handle, sdk_wallet_trustee, V2, TEXT_V2, RATIFIED_V2) + reply = sdk_send_txn_author_agreement(looper, vdr_pool_handle, vdr_wallet_trustee, V2, TEXT_V2, RATIFIED_V2) TIMESTAMP_V2 = reply[1]['result'][TXN_METADATA][TXN_METADATA_TIME] - sdk_send_txn_author_agreement(looper, sdk_pool_handle, sdk_wallet_trustee, V1, retired=TIMESTAMP_V1) + sdk_send_txn_author_agreement(looper, vdr_pool_handle, vdr_wallet_trustee, V1, retired=TIMESTAMP_V1) return txnPoolNodeSet @@ -74,8 +74,8 @@ def taa_value(result, text, version, digest, retired=None, ratified=None): def test_get_txn_author_agreement_returns_latest_taa_by_default(looper, set_txn_author_agreement_aml, nodeSetWithTaa, - sdk_pool_handle, sdk_wallet_client): - reply = sdk_get_txn_author_agreement(looper, sdk_pool_handle, sdk_wallet_client)[1] + vdr_pool_handle, vdr_wallet_client): + reply = sdk_get_txn_author_agreement(looper, vdr_pool_handle, vdr_wallet_client)[1] assert reply['op'] == REPLY result = reply['result'] @@ -86,8 +86,8 @@ def test_get_txn_author_agreement_returns_latest_taa_by_default(looper, set_txn_ def test_get_txn_author_agreement_can_return_taa_for_old_version(looper, nodeSetWithTaa, - sdk_pool_handle, sdk_wallet_client): - reply = sdk_get_txn_author_agreement(looper, sdk_pool_handle, sdk_wallet_client, + vdr_pool_handle, vdr_wallet_client): + reply = sdk_get_txn_author_agreement(looper, vdr_pool_handle, vdr_wallet_client, version=V1)[1] assert reply['op'] == REPLY @@ -99,8 +99,8 @@ def test_get_txn_author_agreement_can_return_taa_for_old_version(looper, nodeSet def test_get_txn_author_agreement_can_return_taa_for_current_version(looper, nodeSetWithTaa, - sdk_pool_handle, sdk_wallet_client): - reply = sdk_get_txn_author_agreement(looper, sdk_pool_handle, sdk_wallet_client, + vdr_pool_handle, vdr_wallet_client): + reply = sdk_get_txn_author_agreement(looper, vdr_pool_handle, vdr_wallet_client, version=V2)[1] assert reply['op'] == REPLY @@ -111,9 +111,9 @@ def test_get_txn_author_agreement_can_return_taa_for_current_version(looper, nod def test_get_txn_author_agreement_doesnt_return_taa_for_nonexistent_version(looper, nodeSetWithTaa, - sdk_pool_handle, sdk_wallet_client): + vdr_pool_handle, vdr_wallet_client): invalid_version = randomString(16) - reply = sdk_get_txn_author_agreement(looper, sdk_pool_handle, sdk_wallet_client, + reply = sdk_get_txn_author_agreement(looper, vdr_pool_handle, vdr_wallet_client, version=invalid_version)[1] assert reply['op'] == REPLY @@ -123,8 +123,8 @@ def test_get_txn_author_agreement_doesnt_return_taa_for_nonexistent_version(loop def test_get_txn_author_agreement_can_return_taa_for_old_digest(looper, nodeSetWithTaa, - sdk_pool_handle, sdk_wallet_client): - reply = sdk_get_txn_author_agreement(looper, sdk_pool_handle, sdk_wallet_client, + vdr_pool_handle, vdr_wallet_client): + reply = sdk_get_txn_author_agreement(looper, vdr_pool_handle, vdr_wallet_client, digest=DIGEST_V1)[1] assert reply['op'] == REPLY @@ -139,8 +139,8 @@ def test_get_txn_author_agreement_can_return_taa_for_old_digest(looper, nodeSetW def test_get_txn_author_agreement_can_return_taa_for_current_digest(looper, nodeSetWithTaa, - sdk_pool_handle, sdk_wallet_client): - reply = sdk_get_txn_author_agreement(looper, sdk_pool_handle, sdk_wallet_client, + vdr_pool_handle, vdr_wallet_client): + reply = sdk_get_txn_author_agreement(looper, vdr_pool_handle, vdr_wallet_client, digest=DIGEST_V2)[1] assert reply['op'] == REPLY @@ -153,9 +153,9 @@ def test_get_txn_author_agreement_can_return_taa_for_current_digest(looper, node def test_get_txn_author_agreement_doesnt_return_taa_for_nonexistent_digest(looper, nodeSetWithTaa, - sdk_pool_handle, sdk_wallet_client): + vdr_pool_handle, vdr_wallet_client): invalid_digest = randomString(16) - reply = sdk_get_txn_author_agreement(looper, sdk_pool_handle, sdk_wallet_client, + reply = sdk_get_txn_author_agreement(looper, vdr_pool_handle, vdr_wallet_client, digest=invalid_digest)[1] assert reply['op'] == REPLY @@ -165,8 +165,8 @@ def test_get_txn_author_agreement_doesnt_return_taa_for_nonexistent_digest(loope def test_get_txn_author_agreement_can_return_taa_for_old_ts(looper, nodeSetWithTaa, - sdk_pool_handle, sdk_wallet_client): - reply = sdk_get_txn_author_agreement(looper, sdk_pool_handle, sdk_wallet_client, + vdr_pool_handle, vdr_wallet_client): + reply = sdk_get_txn_author_agreement(looper, vdr_pool_handle, vdr_wallet_client, timestamp=TIMESTAMP_V2 - 2)[1] assert reply['op'] == REPLY @@ -178,8 +178,8 @@ def test_get_txn_author_agreement_can_return_taa_for_old_ts(looper, nodeSetWithT def test_get_txn_author_agreement_can_return_taa_for_fresh_ts(looper, nodeSetWithTaa, - sdk_pool_handle, sdk_wallet_client): - reply = sdk_get_txn_author_agreement(looper, sdk_pool_handle, sdk_wallet_client, + vdr_pool_handle, vdr_wallet_client): + reply = sdk_get_txn_author_agreement(looper, vdr_pool_handle, vdr_wallet_client, timestamp=TIMESTAMP_V2 + 2)[1] assert reply['op'] == REPLY @@ -191,8 +191,8 @@ def test_get_txn_author_agreement_can_return_taa_for_fresh_ts(looper, nodeSetWit def test_get_txn_author_agreement_doesnt_return_taa_when_it_didnt_exist(looper, nodeSetWithTaa, - sdk_pool_handle, sdk_wallet_client): - reply = sdk_get_txn_author_agreement(looper, sdk_pool_handle, sdk_wallet_client, + vdr_pool_handle, vdr_wallet_client): + reply = sdk_get_txn_author_agreement(looper, vdr_pool_handle, vdr_wallet_client, timestamp=TIMESTAMP_V1 - 2)[1] assert reply['op'] == REPLY diff --git a/plenum/test/txn_author_agreement/test_taa_aml_integration.py b/plenum/test/txn_author_agreement/test_taa_aml_integration.py index 954037e2c6..e246a133a9 100644 --- a/plenum/test/txn_author_agreement/test_taa_aml_integration.py +++ b/plenum/test/txn_author_agreement/test_taa_aml_integration.py @@ -1,47 +1,47 @@ import json import pytest -from plenum.test.pool_transactions.helper import sdk_sign_and_send_prepared_request +from plenum.test.pool_transactions.helper import vdr_sign_and_send_prepared_request from plenum.common.exceptions import RequestNackedException, RequestRejectedException -from plenum.test.helper import sdk_get_and_check_replies +from plenum.test.helper import vdr_get_and_check_replies from plenum.common.constants import AML, AML_CONTEXT -def test_taa_acceptance_writes(looper, taa_aml_request, sdk_pool_handle, sdk_wallet_trustee): - req = sdk_sign_and_send_prepared_request(looper, sdk_wallet_trustee, sdk_pool_handle, taa_aml_request) - sdk_get_and_check_replies(looper, [req]) +def test_taa_acceptance_writes(looper, taa_aml_request, vdr_pool_handle, vdr_wallet_trustee): + req = vdr_sign_and_send_prepared_request(looper, vdr_wallet_trustee, vdr_pool_handle, taa_aml_request) + vdr_get_and_check_replies(looper, [req]) -def test_taa_acceptance_writes_module_static(looper, taa_aml_request, sdk_pool_handle, sdk_wallet_trustee): +def test_taa_acceptance_writes_module_static(looper, taa_aml_request, vdr_pool_handle, vdr_wallet_trustee): taa_aml_request = json.loads(taa_aml_request) taa_aml_request['operation'][AML] = {} taa_aml_request = json.dumps(taa_aml_request) - req = sdk_sign_and_send_prepared_request(looper, sdk_wallet_trustee, sdk_pool_handle, taa_aml_request) + req = vdr_sign_and_send_prepared_request(looper, vdr_wallet_trustee, vdr_pool_handle, taa_aml_request) with pytest.raises(RequestNackedException) as e: - sdk_get_and_check_replies(looper, [req]) + vdr_get_and_check_replies(looper, [req]) assert e.match('TXN_AUTHOR_AGREEMENT_AML request must contain at least one acceptance mechanism') -def test_taa_acceptance_writes_module_dynamic(looper, taa_aml_request, sdk_pool_handle, sdk_wallet_trustee): - req = sdk_sign_and_send_prepared_request(looper, sdk_wallet_trustee, sdk_pool_handle, taa_aml_request) - sdk_get_and_check_replies(looper, [req]) +def test_taa_acceptance_writes_module_dynamic(looper, taa_aml_request, vdr_pool_handle, vdr_wallet_trustee): + req = vdr_sign_and_send_prepared_request(looper, vdr_wallet_trustee, vdr_pool_handle, taa_aml_request) + vdr_get_and_check_replies(looper, [req]) taa_aml_request = json.loads(taa_aml_request) taa_aml_request['reqId'] = 111 taa_aml_request = json.dumps(taa_aml_request) - req = sdk_sign_and_send_prepared_request(looper, sdk_wallet_trustee, sdk_pool_handle, taa_aml_request) + req = vdr_sign_and_send_prepared_request(looper, vdr_wallet_trustee, vdr_pool_handle, taa_aml_request) with pytest.raises(RequestRejectedException) as e: - sdk_get_and_check_replies(looper, [req]) + vdr_get_and_check_replies(looper, [req]) assert e.match('Version of TAA AML must be unique and it cannot be modified') -def test_taa_aml_optional_description(looper, taa_aml_request, sdk_pool_handle, sdk_wallet_trustee): +def test_taa_aml_optional_description(looper, taa_aml_request, vdr_pool_handle, vdr_wallet_trustee): taa_aml_request = json.loads(taa_aml_request) del taa_aml_request['operation'][AML_CONTEXT] taa_aml_request = json.dumps(taa_aml_request) - req = sdk_sign_and_send_prepared_request(looper, sdk_wallet_trustee, sdk_pool_handle, taa_aml_request) - sdk_get_and_check_replies(looper, [req]) + req = vdr_sign_and_send_prepared_request(looper, vdr_wallet_trustee, vdr_pool_handle, taa_aml_request) + vdr_get_and_check_replies(looper, [req]) diff --git a/plenum/test/txn_author_agreement/test_txn_author_agreement.py b/plenum/test/txn_author_agreement/test_txn_author_agreement.py index ff5e18ed8c..a07c05cb42 100644 --- a/plenum/test/txn_author_agreement/test_txn_author_agreement.py +++ b/plenum/test/txn_author_agreement/test_txn_author_agreement.py @@ -1,7 +1,7 @@ import pytest import json -from indy.ledger import build_txn_author_agreement_request +from indy_vdr.ledger import build_txn_author_agreement_request from plenum.common.constants import REPLY, OP_FIELD_NAME, DATA, TXN_AUTHOR_AGREEMENT_RETIREMENT_TS, \ TXN_AUTHOR_AGREEMENT_RATIFICATION_TS, TXN_AUTHOR_AGREEMENT_VERSION, TXN_AUTHOR_AGREEMENT_TEXT @@ -9,8 +9,8 @@ from plenum.common.types import OPERATION from plenum.common.util import randomString, get_utc_epoch -from plenum.test.helper import sdk_get_and_check_replies -from plenum.test.pool_transactions.helper import sdk_sign_and_send_prepared_request +from plenum.test.helper import vdr_get_and_check_replies +from plenum.test.pool_transactions.helper import vdr_sign_and_send_prepared_request from .helper import sdk_send_txn_author_agreement, sdk_get_txn_author_agreement @@ -37,28 +37,28 @@ def test_send_empty_txn_author_agreement_succeeds( def test_send_invalid_txn_author_agreement_fails( - looper, set_txn_author_agreement_aml, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_trustee, random_taa + looper, set_txn_author_agreement_aml, txnPoolNodeSet, vdr_pool_handle, vdr_wallet_trustee, random_taa ): req = looper.loop.run_until_complete( - build_txn_author_agreement_request(sdk_wallet_trustee[1], *random_taa) + build_txn_author_agreement_request(vdr_wallet_trustee[1], *random_taa) ) req = json.loads(req) req[OPERATION]['text'] = 42 - rep = sdk_sign_and_send_prepared_request(looper, sdk_wallet_trustee, sdk_pool_handle, json.dumps(req)) + rep = vdr_sign_and_send_prepared_request(looper, vdr_wallet_trustee, vdr_pool_handle, json.dumps(req)) with pytest.raises(RequestNackedException): - sdk_get_and_check_replies(looper, [rep]) + vdr_get_and_check_replies(looper, [rep]) -def test_create_txn_author_agreement_succeeds(looper, set_txn_author_agreement_aml, sdk_pool_handle, sdk_wallet_trustee): +def test_create_txn_author_agreement_succeeds(looper, set_txn_author_agreement_aml, vdr_pool_handle, vdr_wallet_trustee): # Write random TAA version, text, ratified = randomString(16), randomString(1024), get_utc_epoch() - 600 - sdk_send_txn_author_agreement(looper, sdk_pool_handle, sdk_wallet_trustee, + sdk_send_txn_author_agreement(looper, vdr_pool_handle, vdr_wallet_trustee, version=version, text=text, ratified=ratified) # Make sure TAA successfully written as latest TAA - rep = sdk_get_txn_author_agreement(looper, sdk_pool_handle, sdk_wallet_trustee)[1] + rep = sdk_get_txn_author_agreement(looper, vdr_pool_handle, vdr_wallet_trustee)[1] assert rep[OP_FIELD_NAME] == REPLY taa = rep['result'][DATA] assert taa[TXN_AUTHOR_AGREEMENT_VERSION] == version @@ -67,53 +67,53 @@ def test_create_txn_author_agreement_succeeds(looper, set_txn_author_agreement_a assert TXN_AUTHOR_AGREEMENT_RETIREMENT_TS not in taa # Make sure TAA also available using version - rep = sdk_get_txn_author_agreement(looper, sdk_pool_handle, sdk_wallet_trustee, version=version)[1] + rep = sdk_get_txn_author_agreement(looper, vdr_pool_handle, vdr_wallet_trustee, version=version)[1] assert rep[OP_FIELD_NAME] == REPLY assert rep['result'][DATA] == taa def test_create_txn_author_agreement_without_text_fails(looper, set_txn_author_agreement_aml, - sdk_pool_handle, sdk_wallet_trustee): + vdr_pool_handle, vdr_wallet_trustee): with pytest.raises(RequestRejectedException): - sdk_send_txn_author_agreement(looper, sdk_pool_handle, sdk_wallet_trustee, + sdk_send_txn_author_agreement(looper, vdr_pool_handle, vdr_wallet_trustee, version=randomString(16), ratified=get_utc_epoch() - 600) def test_create_txn_author_agreement_without_ratified_fails(looper, set_txn_author_agreement_aml, - sdk_pool_handle, sdk_wallet_trustee): + vdr_pool_handle, vdr_wallet_trustee): with pytest.raises(RequestRejectedException): - sdk_send_txn_author_agreement(looper, sdk_pool_handle, sdk_wallet_trustee, + sdk_send_txn_author_agreement(looper, vdr_pool_handle, vdr_wallet_trustee, version=randomString(16), text=randomString(1024)) def test_create_txn_author_agreement_with_ratified_from_future_fails(looper, set_txn_author_agreement_aml, - sdk_pool_handle, sdk_wallet_trustee): + vdr_pool_handle, vdr_wallet_trustee): with pytest.raises(RequestRejectedException): - sdk_send_txn_author_agreement(looper, sdk_pool_handle, sdk_wallet_trustee, + sdk_send_txn_author_agreement(looper, vdr_pool_handle, vdr_wallet_trustee, version=randomString(16), text=randomString(1024), ratified=get_utc_epoch() + 600) def test_create_txn_author_agreement_with_milliseconds_ratified_fails(looper, set_txn_author_agreement_aml, - sdk_pool_handle, sdk_wallet_trustee): + vdr_pool_handle, vdr_wallet_trustee): ratified = get_utc_epoch() * 1000 with pytest.raises(RequestNackedException, match="{} = {} is out of range.".format(TXN_AUTHOR_AGREEMENT_RATIFICATION_TS, ratified)): - sdk_send_txn_author_agreement(looper, sdk_pool_handle, sdk_wallet_trustee, + sdk_send_txn_author_agreement(looper, vdr_pool_handle, vdr_wallet_trustee, version=randomString(16), text=randomString(1024), ratified=ratified) def test_create_txn_author_agreement_with_milliseconds_retired_fails(looper, set_txn_author_agreement_aml, - sdk_pool_handle, sdk_wallet_trustee): + vdr_pool_handle, vdr_wallet_trustee): retired = get_utc_epoch() * 1000 with pytest.raises(RequestNackedException, match="{} = {} is out of range.".format(TXN_AUTHOR_AGREEMENT_RETIREMENT_TS, retired)): - sdk_send_txn_author_agreement(looper, sdk_pool_handle, sdk_wallet_trustee, + sdk_send_txn_author_agreement(looper, vdr_pool_handle, vdr_wallet_trustee, version=randomString(16), text=randomString(1024), ratified=get_utc_epoch() - 600, @@ -122,10 +122,10 @@ def test_create_txn_author_agreement_with_milliseconds_retired_fails(looper, set @pytest.mark.parametrize('retired_offset', [-600, 600]) def test_create_txn_author_agreement_with_retired_date_fails(looper, set_txn_author_agreement_aml, - sdk_pool_handle, sdk_wallet_trustee, + vdr_pool_handle, vdr_wallet_trustee, retired_offset): with pytest.raises(RequestRejectedException): - sdk_send_txn_author_agreement(looper, sdk_pool_handle, sdk_wallet_trustee, + sdk_send_txn_author_agreement(looper, vdr_pool_handle, vdr_wallet_trustee, version=randomString(16), text=randomString(1024), ratified=get_utc_epoch() - 600, @@ -133,17 +133,17 @@ def test_create_txn_author_agreement_with_retired_date_fails(looper, set_txn_aut def test_txn_author_agreement_update_text_fails(looper, set_txn_author_agreement_aml, - sdk_pool_handle, sdk_wallet_trustee): + vdr_pool_handle, vdr_wallet_trustee): # Write random TAA version, text, ratified = randomString(16), randomString(1024), get_utc_epoch() - 600 - sdk_send_txn_author_agreement(looper, sdk_pool_handle, sdk_wallet_trustee, + sdk_send_txn_author_agreement(looper, vdr_pool_handle, vdr_wallet_trustee, version=version, text=text, ratified=ratified) # Try to update text with pytest.raises(RequestRejectedException): - sdk_send_txn_author_agreement(looper, sdk_pool_handle, sdk_wallet_trustee, + sdk_send_txn_author_agreement(looper, vdr_pool_handle, vdr_wallet_trustee, version=version, text=randomString(256), ratified=ratified) @@ -151,17 +151,17 @@ def test_txn_author_agreement_update_text_fails(looper, set_txn_author_agreement @pytest.mark.parametrize('ratified_offset', [-600, 600]) def test_txn_author_agreement_update_ratification_fails(looper, set_txn_author_agreement_aml, - sdk_pool_handle, sdk_wallet_trustee, ratified_offset): + vdr_pool_handle, vdr_wallet_trustee, ratified_offset): # Write random TAA version, text, ratified = randomString(16), randomString(1024), get_utc_epoch() - 600 - sdk_send_txn_author_agreement(looper, sdk_pool_handle, sdk_wallet_trustee, + sdk_send_txn_author_agreement(looper, vdr_pool_handle, vdr_wallet_trustee, version=version, text=text, ratified=ratified) # Try to update ratification timestamp with pytest.raises(RequestRejectedException): - sdk_send_txn_author_agreement(looper, sdk_pool_handle, sdk_wallet_trustee, + sdk_send_txn_author_agreement(looper, vdr_pool_handle, vdr_wallet_trustee, version=version, text=text, ratified=ratified + ratified_offset) @@ -171,28 +171,28 @@ def test_txn_author_agreement_update_ratification_fails(looper, set_txn_author_a @pytest.mark.parametrize('pass_text', [True, False]) @pytest.mark.parametrize('pass_ratification', [True, False]) def test_txn_author_agreement_retire_non_latest(looper, set_txn_author_agreement_aml, - sdk_pool_handle, sdk_wallet_trustee, + vdr_pool_handle, vdr_wallet_trustee, retired_offset, pass_text, pass_ratification): version_1, text_1, ratified_1 = randomString(16), randomString(1024), get_utc_epoch() - 600 - sdk_send_txn_author_agreement(looper, sdk_pool_handle, sdk_wallet_trustee, + sdk_send_txn_author_agreement(looper, vdr_pool_handle, vdr_wallet_trustee, version=version_1, text=text_1, ratified=ratified_1) version_2, text_2, ratified_2 = randomString(16), randomString(1024), get_utc_epoch() - 600 - sdk_send_txn_author_agreement(looper, sdk_pool_handle, sdk_wallet_trustee, + sdk_send_txn_author_agreement(looper, vdr_pool_handle, vdr_wallet_trustee, version=version_2, text=text_2, ratified=ratified_2) retired_1 = get_utc_epoch() + retired_offset - sdk_send_txn_author_agreement(looper, sdk_pool_handle, sdk_wallet_trustee, version=version_1, + sdk_send_txn_author_agreement(looper, vdr_pool_handle, vdr_wallet_trustee, version=version_1, text=text_1 if pass_text else None, ratified=ratified_1 if pass_ratification else None, retired=retired_1) # Make sure old TAA is retired - rep = sdk_get_txn_author_agreement(looper, sdk_pool_handle, sdk_wallet_trustee, version=version_1)[1] + rep = sdk_get_txn_author_agreement(looper, vdr_pool_handle, vdr_wallet_trustee, version=version_1)[1] assert rep[OP_FIELD_NAME] == REPLY taa = rep['result'][DATA] assert taa[TXN_AUTHOR_AGREEMENT_VERSION] == version_1 @@ -201,7 +201,7 @@ def test_txn_author_agreement_retire_non_latest(looper, set_txn_author_agreement assert taa[TXN_AUTHOR_AGREEMENT_RETIREMENT_TS] == retired_1 # Make sure new TAA is not retired - rep = sdk_get_txn_author_agreement(looper, sdk_pool_handle, sdk_wallet_trustee, version=version_2)[1] + rep = sdk_get_txn_author_agreement(looper, vdr_pool_handle, vdr_wallet_trustee, version=version_2)[1] assert rep[OP_FIELD_NAME] == REPLY taa = rep['result'][DATA] assert taa[TXN_AUTHOR_AGREEMENT_VERSION] == version_2 @@ -210,7 +210,7 @@ def test_txn_author_agreement_retire_non_latest(looper, set_txn_author_agreement assert TXN_AUTHOR_AGREEMENT_RETIREMENT_TS not in taa # Make sure latest TAA is not changed - rep = sdk_get_txn_author_agreement(looper, sdk_pool_handle, sdk_wallet_trustee, version=version_2)[1] + rep = sdk_get_txn_author_agreement(looper, vdr_pool_handle, vdr_wallet_trustee, version=version_2)[1] assert rep[OP_FIELD_NAME] == REPLY assert taa == rep['result'][DATA] @@ -219,18 +219,18 @@ def test_txn_author_agreement_retire_non_latest(looper, set_txn_author_agreement @pytest.mark.parametrize('pass_text', [True, False]) @pytest.mark.parametrize('pass_ratification', [True, False]) def test_txn_author_agreement_retire_latest_fails(looper, set_txn_author_agreement_aml, - sdk_pool_handle, sdk_wallet_trustee, + vdr_pool_handle, vdr_wallet_trustee, retired_offset, pass_text, pass_ratification): # Write random TAA version, text, ratified = randomString(16), randomString(1024), get_utc_epoch() - 600 - sdk_send_txn_author_agreement(looper, sdk_pool_handle, sdk_wallet_trustee, + sdk_send_txn_author_agreement(looper, vdr_pool_handle, vdr_wallet_trustee, version=version, text=text, ratified=ratified) # Make sure we cannot retire it with pytest.raises(RequestRejectedException): - sdk_send_txn_author_agreement(looper, sdk_pool_handle, sdk_wallet_trustee, + sdk_send_txn_author_agreement(looper, vdr_pool_handle, vdr_wallet_trustee, version=version, text=text if pass_text else None, ratified=ratified if pass_ratification else None, @@ -291,8 +291,8 @@ def text_txn_author_agreement_can_change_retirement(looper, set_txn_author_agree def test_send_valid_txn_author_agreement_without_enough_privileges_fails( looper, set_txn_author_agreement_aml, txnPoolNodeSet, - sdk_pool_handle, sdk_wallet_steward, random_taa + vdr_pool_handle, vdr_wallet_steward, random_taa ): with pytest.raises(RequestRejectedException): text, version = random_taa - sdk_send_txn_author_agreement(looper, sdk_pool_handle, sdk_wallet_steward, version, text) + sdk_send_txn_author_agreement(looper, vdr_pool_handle, vdr_wallet_steward, version, text) diff --git a/plenum/test/txn_author_agreement/test_txn_author_agreement_disable.py b/plenum/test/txn_author_agreement/test_txn_author_agreement_disable.py index 2f1a90c2fd..677fd12c21 100644 --- a/plenum/test/txn_author_agreement/test_txn_author_agreement_disable.py +++ b/plenum/test/txn_author_agreement/test_txn_author_agreement_disable.py @@ -10,86 +10,86 @@ def test_send_valid_txn_author_agreement_succeeds_and_disable( set_txn_author_agreement_aml, set_txn_author_agreement, get_txn_author_agreement, - looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_trustee + looper, txnPoolNodeSet, vdr_pool_handle, vdr_wallet_trustee ): ratified = get_utc_epoch() - 600 taa1 = set_txn_author_agreement(ratified=ratified) taa2 = set_txn_author_agreement() retirement_ts = 5 taa1 = set_txn_author_agreement(taa1.text, taa1.version, retirement_ts, ratified=ratified) - sdk_send_txn_author_agreement_disable(looper, sdk_pool_handle, sdk_wallet_trustee) + sdk_send_txn_author_agreement_disable(looper, vdr_pool_handle, vdr_wallet_trustee) reply = sdk_get_txn_author_agreement( - looper, sdk_pool_handle, sdk_wallet_trustee)[1] + looper, vdr_pool_handle, vdr_wallet_trustee)[1] assert reply[f.RESULT.nm][DATA] is None reply = sdk_get_txn_author_agreement( - looper, sdk_pool_handle, sdk_wallet_trustee, version=taa2.version)[1] + looper, vdr_pool_handle, vdr_wallet_trustee, version=taa2.version)[1] result = reply[f.RESULT.nm] assert result[DATA][TXN_AUTHOR_AGREEMENT_RETIREMENT_TS] == result[TXN_TIME] reply = sdk_get_txn_author_agreement( - looper, sdk_pool_handle, sdk_wallet_trustee, version=taa1.version)[1] + looper, vdr_pool_handle, vdr_wallet_trustee, version=taa1.version)[1] result = reply[f.RESULT.nm] assert result[DATA][TXN_AUTHOR_AGREEMENT_RETIREMENT_TS] == retirement_ts def test_send_txn_author_agreement_disable_twice( set_txn_author_agreement_aml, set_txn_author_agreement, get_txn_author_agreement, - looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_trustee + looper, txnPoolNodeSet, vdr_pool_handle, vdr_wallet_trustee ): set_txn_author_agreement() - sdk_send_txn_author_agreement_disable(looper, sdk_pool_handle, sdk_wallet_trustee) + sdk_send_txn_author_agreement_disable(looper, vdr_pool_handle, vdr_wallet_trustee) with pytest.raises( RequestRejectedException, match='Transaction author agreement is already disabled' ): - sdk_send_txn_author_agreement_disable(looper, sdk_pool_handle, sdk_wallet_trustee) + sdk_send_txn_author_agreement_disable(looper, vdr_pool_handle, vdr_wallet_trustee) assert get_txn_author_agreement() is None @pytest.mark.parametrize('retired_offset', [300, -300, -900, None]) def test_cannot_change_retirement_ts_of_latest_taa_after_disable_all(looper, set_txn_author_agreement_aml, - sdk_pool_handle, sdk_wallet_trustee, + vdr_pool_handle, vdr_wallet_trustee, retired_offset): # Write random TAA version, text, ratified = randomString(16), randomString(1024), get_utc_epoch() - 600 - sdk_send_txn_author_agreement(looper, sdk_pool_handle, sdk_wallet_trustee, + sdk_send_txn_author_agreement(looper, vdr_pool_handle, vdr_wallet_trustee, version=version, text=text, ratified=ratified) # Disable all TAAs - sdk_send_txn_author_agreement_disable(looper, sdk_pool_handle, sdk_wallet_trustee) + sdk_send_txn_author_agreement_disable(looper, vdr_pool_handle, vdr_wallet_trustee) # Make sure we cannot change its retirement date with pytest.raises(RequestRejectedException): retired = get_utc_epoch() + retired_offset if retired_offset is not None else None - sdk_send_txn_author_agreement(looper, sdk_pool_handle, sdk_wallet_trustee, + sdk_send_txn_author_agreement(looper, vdr_pool_handle, vdr_wallet_trustee, version=version, retired=retired) @pytest.mark.parametrize('retired_offset', [300, -300, -900, None]) def test_cannot_change_retirement_ts_of_non_latest_taa_after_disable_all(looper, set_txn_author_agreement_aml, - sdk_pool_handle, sdk_wallet_trustee, + vdr_pool_handle, vdr_wallet_trustee, retired_offset): version_1, text_1, ratified_1 = randomString(16), randomString(1024), get_utc_epoch() - 600 - sdk_send_txn_author_agreement(looper, sdk_pool_handle, sdk_wallet_trustee, + sdk_send_txn_author_agreement(looper, vdr_pool_handle, vdr_wallet_trustee, version=version_1, text=text_1, ratified=ratified_1) version_2, text_2, ratified_2 = randomString(16), randomString(1024), get_utc_epoch() - 600 - sdk_send_txn_author_agreement(looper, sdk_pool_handle, sdk_wallet_trustee, + sdk_send_txn_author_agreement(looper, vdr_pool_handle, vdr_wallet_trustee, version=version_2, text=text_2, ratified=ratified_2) # Disable all TAAs - sdk_send_txn_author_agreement_disable(looper, sdk_pool_handle, sdk_wallet_trustee) + sdk_send_txn_author_agreement_disable(looper, vdr_pool_handle, vdr_wallet_trustee) # Make sure we cannot change its retirement date with pytest.raises(RequestRejectedException): retired = get_utc_epoch() + retired_offset if retired_offset is not None else None - sdk_send_txn_author_agreement(looper, sdk_pool_handle, sdk_wallet_trustee, + sdk_send_txn_author_agreement(looper, vdr_pool_handle, vdr_wallet_trustee, version=version_1, retired=retired) diff --git a/plenum/test/utils.py b/plenum/test/utils.py new file mode 100644 index 0000000000..c66ee0d136 --- /dev/null +++ b/plenum/test/utils.py @@ -0,0 +1,20 @@ + + +async def get_did_signing_key(wallet_handle, did): + item = await wallet_handle.fetch("did", did, for_update=False) + if item: + kp = await wallet_handle.fetch_key(item.value_json.get("verkey")) + return kp.key + return None + +async def sign_request(wallet_handle, submitter_did, req): + key = await get_did_signing_key(wallet_handle, submitter_did) + if not key: + raise Exception(f"Key for DID {submitter_did} is empty") + req.set_signature(key.sign_message(req.signature_input)) + return req + +async def sign_and_submit_request(pool_handle, wallet_handle, submitter_did, req): + sreq = await sign_request(wallet_handle, submitter_did, req) + request_result = await pool_handle.submit_request(sreq) + return request_result \ No newline at end of file diff --git a/plenum/test/validator_info/conftest.py b/plenum/test/validator_info/conftest.py index 297fcb1529..5011a89c7c 100644 --- a/plenum/test/validator_info/conftest.py +++ b/plenum/test/validator_info/conftest.py @@ -5,7 +5,7 @@ from plenum.common.request import Request from plenum.common.types import f from plenum.common.util import getTimeBasedId -from plenum.test.helper import sdk_sign_and_submit_req_obj, sdk_get_and_check_replies +from plenum.test.helper import vdr_sign_and_submit_req_obj, vdr_get_and_check_replies, vdr_gen_request TEST_NODE_NAME = 'Alpha' INFO_FILENAME = '{}_info.json'.format(TEST_NODE_NAME.lower()) @@ -27,20 +27,18 @@ def node(txnPoolNodeSet): @pytest.fixture def read_txn_and_get_latest_info(looper, - sdk_pool_handle, - sdk_wallet_client, node): - _, did = sdk_wallet_client + vdr_pool_handle, + vdr_wallet_client, node): + _, did = vdr_wallet_client def read_wrapped(txn_type): op = { TXN_TYPE: txn_type, f.LEDGER_ID.nm: DOMAIN_LEDGER_ID, DATA: 1 } - req = Request(identifier=did, - operation=op, reqId=getTimeBasedId(), - protocolVersion=CURRENT_PROTOCOL_VERSION) - sdk_get_and_check_replies(looper, [sdk_sign_and_submit_req_obj( - looper, sdk_pool_handle, sdk_wallet_client, req)]) + req = vdr_gen_request(op, CURRENT_PROTOCOL_VERSION, did, reqId=getTimeBasedId()) + vdr_get_and_check_replies(looper, [vdr_sign_and_submit_req_obj( + looper, vdr_pool_handle, vdr_wallet_client, req)]) return node._info_tool.info diff --git a/plenum/test/validator_info/test_validator_info.py b/plenum/test/validator_info/test_validator_info.py index 4b908d4e80..67052f56a0 100644 --- a/plenum/test/validator_info/test_validator_info.py +++ b/plenum/test/validator_info/test_validator_info.py @@ -7,7 +7,7 @@ from crypto.bls.indy_crypto.bls_crypto_indy_crypto import IndyCryptoBlsUtils from plenum.common.constants import GET_TXN from plenum.server.validator_info_tool import ValidatorNodeInfoTool -from plenum.test.helper import sdk_send_random_and_check +from plenum.test.helper import vdr_send_random_and_check from plenum.test.pool_transactions.helper import disconnect_node_and_ensure_disconnected from stp_core.common.constants import ZMQ_NETWORK_PROTOCOL @@ -244,13 +244,13 @@ def test_protocol_info_section(info): @pytest.fixture def write_txn_and_get_latest_info(txnPoolNodesLooper, - sdk_pool_handle, - sdk_wallet_client, + vdr_pool_handle, + vdr_wallet_client, node): def write_wrapped(): - sdk_send_random_and_check(txnPoolNodesLooper, range(nodeCount), - sdk_pool_handle, - sdk_wallet_client, + vdr_send_random_and_check(txnPoolNodesLooper, range(nodeCount), + vdr_pool_handle, + vdr_wallet_client, 1) return node._info_tool.info diff --git a/plenum/test/validator_info/test_validator_info_vc.py b/plenum/test/validator_info/test_validator_info_vc.py index 3c14f08936..12a1830690 100644 --- a/plenum/test/validator_info/test_validator_info_vc.py +++ b/plenum/test/validator_info/test_validator_info_vc.py @@ -1,9 +1,9 @@ import pytest from plenum.server.suspicion_codes import Suspicions -from plenum.test.helper import checkViewNoForNodes, sdk_send_random_and_check +from plenum.test.helper import checkViewNoForNodes, vdr_send_random_and_check from plenum.test.node_catchup.helper import waitNodeDataEquality -from plenum.test.pool_transactions.helper import disconnect_node_and_ensure_disconnected, sdk_pool_refresh +from plenum.test.pool_transactions.helper import disconnect_node_and_ensure_disconnected, vdr_pool_refresh from plenum.test.test_node import get_master_primary_node, checkNodesConnected from plenum.test.view_change.helper import start_stopped_node from plenum.test.view_change_service.helper import send_test_instance_change, trigger_view_change @@ -21,8 +21,8 @@ def tconf(tconf): def test_number_txns_in_catchup_and_vc_queue_valid(looper, txnPoolNodeSet, tconf, - sdk_pool_handle, - sdk_wallet_steward, + vdr_pool_handle, + vdr_wallet_steward, tdir, allPluginsPath): num_txns = 5 @@ -36,8 +36,8 @@ def test_number_txns_in_catchup_and_vc_queue_valid(looper, looper.removeProdable(master_node) looper.run(eventually(checkViewNoForNodes, other_nodes, expected_view_no, retryWait=1, timeout=tconf.NEW_VIEW_TIMEOUT)) - sdk_pool_refresh(looper, sdk_pool_handle) - sdk_send_random_and_check(looper, other_nodes, sdk_pool_handle, sdk_wallet_steward, num_txns) + vdr_pool_refresh(looper, vdr_pool_handle) + vdr_send_random_and_check(looper, other_nodes, vdr_pool_handle, vdr_wallet_steward, num_txns) master_node = start_stopped_node(master_node, looper, tconf, tdir, allPluginsPath) txnPoolNodeSet[master_node_index] = master_node @@ -54,8 +54,8 @@ def test_number_txns_in_catchup_and_vc_queue_valid(looper, def test_instance_change_before_vc(looper, txnPoolNodeSet, tconf, - sdk_pool_handle, - sdk_wallet_steward): + vdr_pool_handle, + vdr_wallet_steward): master_node = get_master_primary_node(txnPoolNodeSet) old_view = master_node.viewNo expected_view_no = old_view + 1 diff --git a/plenum/test/view_change/helper.py b/plenum/test/view_change/helper.py index 6516c82483..eb5ad6f014 100644 --- a/plenum/test/view_change/helper.py +++ b/plenum/test/view_change/helper.py @@ -8,9 +8,9 @@ from plenum.test.delayers import delayNonPrimaries, delay_3pc_messages, \ reset_delays_and_process_delayeds from plenum.test.helper import checkViewNoForNodes, \ - sdk_send_random_requests, sdk_send_random_and_check + vdr_send_random_requests, vdr_send_random_and_check from plenum.test.pool_transactions.helper import \ - disconnect_node_and_ensure_disconnected, sdk_add_new_steward_and_node, sdk_pool_refresh + disconnect_node_and_ensure_disconnected, vdr_add_new_steward_and_node, vdr_pool_refresh from plenum.test.node_catchup.helper import ensure_all_nodes_have_same_data, waitNodeDataEquality from plenum.test.test_node import get_master_primary_node, ensureElectionsDone, \ TestNode, checkNodesConnected, check_not_in_view_change @@ -53,7 +53,7 @@ def provoke_and_check_view_change(looper, nodes, newViewNo, sdk_pool_handle, sdk else: logger.info('Master instance has not degraded yet, ' 'sending more requests') - sdk_send_random_requests(looper, sdk_pool_handle, sdk_wallet_client) + vdr_send_random_requests(looper, sdk_pool_handle, sdk_wallet_client) assert False @@ -81,7 +81,7 @@ def simulate_slow_master(looper, txnPoolNodeSet, sdk_pool_handle, # Delay processing of PRE-PREPARE from all non primary replicas of master # so master's performance falls and view changes delayNonPrimaries(txnPoolNodeSet, 0, delay) - sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle, + vdr_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_steward, num_reqs) return m_primary_node @@ -295,10 +295,10 @@ def view_change_in_between_3pc(looper, nodes, slow_nodes, sdk_pool_handle, sdk_wallet_client, slow_delay=1, wait=None): - sdk_send_random_and_check(looper, nodes, sdk_pool_handle, sdk_wallet_client, 4) + vdr_send_random_and_check(looper, nodes, sdk_pool_handle, sdk_wallet_client, 4) delay_3pc_messages(slow_nodes, 0, delay=slow_delay) - sdk_send_random_requests(looper, sdk_pool_handle, sdk_wallet_client, 10) + vdr_send_random_requests(looper, sdk_pool_handle, sdk_wallet_client, 10) if wait: looper.runFor(wait) @@ -310,9 +310,9 @@ def view_change_in_between_3pc(looper, nodes, slow_nodes, ensureElectionsDone(looper=looper, nodes=nodes) ensure_all_nodes_have_same_data(looper, nodes) - sdk_send_random_and_check(looper, nodes, sdk_pool_handle, + vdr_send_random_and_check(looper, nodes, sdk_pool_handle, sdk_wallet_client, 5, total_timeout=30) - sdk_send_random_and_check(looper, nodes, sdk_pool_handle, + vdr_send_random_and_check(looper, nodes, sdk_pool_handle, sdk_wallet_client, 5, total_timeout=30) @@ -325,13 +325,13 @@ def view_change_in_between_3pc_random_delays( tconf, min_delay=0, max_delay=0): - sdk_send_random_and_check(looper, nodes, sdk_pool_handle, sdk_wallet_client, 4) + vdr_send_random_and_check(looper, nodes, sdk_pool_handle, sdk_wallet_client, 4) # max delay should not be more than catchup timeout. max_delay = max_delay or tconf.NEW_VIEW_TIMEOUT - 1 delay_3pc_messages(slow_nodes, 0, min_delay=min_delay, max_delay=max_delay) - sdk_send_random_requests(looper, sdk_pool_handle, sdk_wallet_client, 10) + vdr_send_random_requests(looper, sdk_pool_handle, sdk_wallet_client, 10) ensure_view_change_complete(looper, nodes, @@ -340,14 +340,14 @@ def view_change_in_between_3pc_random_delays( reset_delays_and_process_delayeds(slow_nodes) - sdk_send_random_and_check(looper, nodes, sdk_pool_handle, sdk_wallet_client, 10) + vdr_send_random_and_check(looper, nodes, sdk_pool_handle, sdk_wallet_client, 10) def add_new_node(looper, nodes, sdk_pool_handle, sdk_wallet_steward, tdir, tconf, all_plugins_path, name=None, wait_till_added=True): node_name = name or "Psi" new_steward_name = "testClientSteward" + randomString(3) - _, new_node = sdk_add_new_steward_and_node( + _, new_node = vdr_add_new_steward_and_node( looper, sdk_pool_handle, sdk_wallet_steward, new_steward_name, node_name, tdir, tconf, allPluginsPath=all_plugins_path, wait_till_added=wait_till_added) @@ -358,7 +358,7 @@ def add_new_node(looper, nodes, sdk_pool_handle, sdk_wallet_steward, waitNodeDataEquality(looper, new_node, *nodes[:-1], customTimeout=timeout, exclude_from_check=['check_last_ordered_3pc_backup']) - sdk_pool_refresh(looper, sdk_pool_handle) + vdr_pool_refresh(looper, sdk_pool_handle) return new_node diff --git a/plenum/test/view_change/test_3pc_msgs_during_view_change.py b/plenum/test/view_change/test_3pc_msgs_during_view_change.py index 1507297368..ddf9157f20 100644 --- a/plenum/test/view_change/test_3pc_msgs_during_view_change.py +++ b/plenum/test/view_change/test_3pc_msgs_during_view_change.py @@ -3,7 +3,7 @@ from plenum.common.exceptions import RequestRejectedException from plenum.test.delayers import ppgDelay -from plenum.test.helper import sdk_send_random_and_check, sdk_send_random_requests, sdk_get_replies, send_pre_prepare, \ +from plenum.test.helper import vdr_send_random_and_check, vdr_send_random_requests, vdr_get_replies, send_pre_prepare, \ send_prepare, send_commit from plenum.test.view_change.helper import check_replica_queue_empty, \ check_all_replica_queue_empty @@ -11,13 +11,13 @@ @pytest.mark.skip('Currently we stash client requests during view change') def test_no_requests_processed_during_view_change(looper, txnPoolNodeSet, - sdk_pool_handle, sdk_wallet_client): + vdr_pool_handle, vdr_wallet_client): for node in txnPoolNodeSet: node.view_change_in_progress = True with pytest.raises(RequestRejectedException) as e: - sdk_send_random_and_check(looper, txnPoolNodeSet, - sdk_pool_handle, sdk_wallet_client, 10) + vdr_send_random_and_check(looper, txnPoolNodeSet, + vdr_pool_handle, vdr_wallet_client, 10) assert 'Can not process requests when view change is in progress' in \ e._excinfo[1].args[0] @@ -49,8 +49,8 @@ def test_no_new_view_3pc_messages_processed_during_view_change( @pytest.mark.skip('The filter is not enabled now') def test_old_view_requests_processed_during_view_change(looper, txnPoolNodeSet, - sdk_wallet_handle, - sdk_wallet_client): + vdr_wallet_handle, + vdr_wallet_client): """ Make sure that requests sent before view change started are processed and replies are returned: - delay Propogates (to make sure that requests are not ordered before view change is started) @@ -61,11 +61,11 @@ def test_old_view_requests_processed_during_view_change(looper, txnPoolNodeSet, node.view_change_in_progress = False node.nodeIbStasher.delay(ppgDelay(3, 0)) - requests = sdk_send_random_requests(looper, sdk_wallet_handle, - sdk_wallet_client, 2) + requests = vdr_send_random_requests(looper, vdr_wallet_handle, + vdr_wallet_client, 2) looper.runFor(1) for node in txnPoolNodeSet: node.view_change_in_progress = True - sdk_get_replies(looper, requests) + vdr_get_replies(looper, requests) diff --git a/plenum/test/view_change/test_6th_node_join_after_view_change_by_primary_restart.py b/plenum/test/view_change/test_6th_node_join_after_view_change_by_primary_restart.py index c32f777612..33b048f810 100644 --- a/plenum/test/view_change/test_6th_node_join_after_view_change_by_primary_restart.py +++ b/plenum/test/view_change/test_6th_node_join_after_view_change_by_primary_restart.py @@ -3,7 +3,7 @@ from plenum.test.view_change.helper import ensure_all_nodes_have_same_data, \ ensure_view_change, add_new_node from plenum.common.constants import DOMAIN_LEDGER_ID, LedgerState, POOL_LEDGER_ID -from plenum.test.helper import sdk_send_random_and_check +from plenum.test.helper import vdr_send_random_and_check from stp_core.common.log import getlogger from stp_core.loop.eventually import eventually @@ -11,7 +11,7 @@ waitNodeDataEquality from plenum.common.util import randomString from plenum.test.test_node import checkNodesConnected, ensureElectionsDone -from plenum.test.pool_transactions.helper import sdk_add_new_steward_and_node, sdk_pool_refresh +from plenum.test.pool_transactions.helper import vdr_add_new_steward_and_node, vdr_pool_refresh from plenum.test import waits from plenum.common.startable import Mode @@ -42,8 +42,8 @@ def catchuped(node): def test_6th_node_join_after_view_change_by_master_restart( looper, txnPoolNodeSet, tdir, tconf, - allPluginsPath, sdk_pool_handle, - sdk_wallet_steward, + allPluginsPath, vdr_pool_handle, + vdr_wallet_steward, limitTestRunningTime): """ Test steps: @@ -63,19 +63,19 @@ def test_6th_node_join_after_view_change_by_master_restart( for node in txnPoolNodeSet: looper.run(eventually(catchuped, node, timeout=2 * timeout)) ensure_all_nodes_have_same_data(looper, txnPoolNodeSet, custom_timeout=timeout) - sdk_send_random_and_check(looper, txnPoolNodeSet, - sdk_pool_handle, sdk_wallet_steward, 5) + vdr_send_random_and_check(looper, txnPoolNodeSet, + vdr_pool_handle, vdr_wallet_steward, 5) new_epsilon_node = add_new_node(looper, txnPoolNodeSet, - sdk_pool_handle, - sdk_wallet_steward, + vdr_pool_handle, + vdr_wallet_steward, tdir, tconf, allPluginsPath, name='Epsilon') - sdk_send_random_and_check(looper, txnPoolNodeSet, - sdk_pool_handle, sdk_wallet_steward, 5) + vdr_send_random_and_check(looper, txnPoolNodeSet, + vdr_pool_handle, vdr_wallet_steward, 5) """ check that pool and domain ledgers for new node are in synced state """ @@ -92,12 +92,12 @@ def test_6th_node_join_after_view_change_by_master_restart( timeout = waits.expectedPoolCatchupTime(nodeCount=len(txnPoolNodeSet)) for node in txnPoolNodeSet: looper.run(eventually(catchuped, node, timeout=3 * timeout)) - sdk_send_random_and_check(looper, txnPoolNodeSet, - sdk_pool_handle, sdk_wallet_steward, 2) + vdr_send_random_and_check(looper, txnPoolNodeSet, + vdr_pool_handle, vdr_wallet_steward, 2) new_psi_node = add_new_node(looper, txnPoolNodeSet, - sdk_pool_handle, - sdk_wallet_steward, + vdr_pool_handle, + vdr_wallet_steward, tdir, tconf, allPluginsPath, diff --git a/plenum/test/view_change/test_add_node_delay_commit_on_one.py b/plenum/test/view_change/test_add_node_delay_commit_on_one.py index a6d982c4e6..10e8360fef 100644 --- a/plenum/test/view_change/test_add_node_delay_commit_on_one.py +++ b/plenum/test/view_change/test_add_node_delay_commit_on_one.py @@ -1,17 +1,17 @@ from plenum.test.delayers import cDelay from plenum.test.helper import waitForViewChange from plenum.test.node_catchup.helper import ensure_all_nodes_have_same_data -from plenum.test.pool_transactions.helper import sdk_add_new_steward_and_node +from plenum.test.pool_transactions.helper import vdr_add_new_steward_and_node from plenum.test.stasher import delay_rules from plenum.test.test_node import checkNodesConnected, ensureElectionsDone -def test_add_node_delay_commit_on_one(looper, txnPoolNodeSet, sdk_pool_handle, - sdk_wallet_steward, tdir, tconf, allPluginsPath): +def test_add_node_delay_commit_on_one(looper, txnPoolNodeSet, vdr_pool_handle, + vdr_wallet_steward, tdir, tconf, allPluginsPath): view_no = txnPoolNodeSet[-1].viewNo # Add a New node but don't allow Delta to be aware of it. We do not want it in Delta's node registry. with delay_rules(txnPoolNodeSet[-1].nodeIbStasher, cDelay()): - _, new_node = sdk_add_new_steward_and_node(looper, sdk_pool_handle, sdk_wallet_steward, + _, new_node = vdr_add_new_steward_and_node(looper, vdr_pool_handle, vdr_wallet_steward, 'New_Steward', 'Epsilon', tdir, tconf, allPluginsPath=allPluginsPath) txnPoolNodeSet.append(new_node) diff --git a/plenum/test/view_change/test_backup_stabilized_checkpoint_on_view_change.py b/plenum/test/view_change/test_backup_stabilized_checkpoint_on_view_change.py index 01ab2c9818..798b302f70 100644 --- a/plenum/test/view_change/test_backup_stabilized_checkpoint_on_view_change.py +++ b/plenum/test/view_change/test_backup_stabilized_checkpoint_on_view_change.py @@ -1,7 +1,7 @@ import pytest from plenum.test.delayers import ppDelay -from plenum.test.helper import sdk_send_random_requests, assertExp +from plenum.test.helper import vdr_send_random_requests, assertExp from plenum.test.stasher import delay_rules from plenum.test.test_node import ensureElectionsDone from plenum.test.view_change.helper import ensure_view_change @@ -21,13 +21,13 @@ def tconf(tconf): def test_backup_stabilized_checkpoint_on_view_change(looper, txnPoolNodeSet, - sdk_wallet_client, - sdk_pool_handle): + vdr_wallet_client, + vdr_pool_handle): # Delta:1 backup = txnPoolNodeSet[-1].replicas[1] count_of_replicas = len(txnPoolNodeSet[0].replicas) with delay_rules([n.nodeIbStasher for n in txnPoolNodeSet], ppDelay(instId=0)): - sdk_send_random_requests(looper, sdk_pool_handle, sdk_wallet_client, REQ_COUNT) + vdr_send_random_requests(looper, vdr_pool_handle, vdr_wallet_client, REQ_COUNT) looper.run(eventually(lambda r: assertExp(r.last_ordered_3pc == (0, REQ_COUNT)), backup)) # assert that all of requests are propagated for n in txnPoolNodeSet: diff --git a/plenum/test/view_change/test_catchup_to_next_view_during_view_change.py b/plenum/test/view_change/test_catchup_to_next_view_during_view_change.py index 16e4afa13a..e0ccf09261 100644 --- a/plenum/test/view_change/test_catchup_to_next_view_during_view_change.py +++ b/plenum/test/view_change/test_catchup_to_next_view_during_view_change.py @@ -1,9 +1,9 @@ import pytest from plenum.test.delayers import icDelay, delay_for_view, vc_delay -from plenum.test.helper import checkViewNoForNodes, sdk_send_random_and_check, waitForViewChange +from plenum.test.helper import checkViewNoForNodes, vdr_send_random_and_check, waitForViewChange from plenum.test.node_catchup.helper import ensure_all_nodes_have_same_data -from plenum.test.node_request.helper import sdk_ensure_pool_functional +from plenum.test.node_request.helper import vdr_ensure_pool_functional from plenum.test.stasher import delay_rules from plenum.test.test_node import checkProtocolInstanceSetup, ensureElectionsDone from plenum.test.view_change_service.helper import trigger_view_change @@ -12,7 +12,7 @@ def test_catchup_to_next_view_during_view_change_0_to_1_then_1_to_2(txnPoolNodeSet, looper, - sdk_pool_handle, sdk_wallet_steward): + vdr_pool_handle, vdr_wallet_steward): ''' 1) Lagging node is not a primary for new views 2) All nodes except the lagging one go to view=1 @@ -39,8 +39,8 @@ def test_catchup_to_next_view_during_view_change_0_to_1_then_1_to_2(txnPoolNodeS ensure_all_nodes_have_same_data(looper, nodes=other_nodes) # order some txns - sdk_send_random_and_check(looper, txnPoolNodeSet, - sdk_pool_handle, sdk_wallet_steward, 5) + vdr_send_random_and_check(looper, txnPoolNodeSet, + vdr_pool_handle, vdr_wallet_steward, 5) # view change to viewNo=2 trigger_view_change(txnPoolNodeSet) @@ -51,8 +51,8 @@ def test_catchup_to_next_view_during_view_change_0_to_1_then_1_to_2(txnPoolNodeS ensure_all_nodes_have_same_data(looper, nodes=other_nodes) # order some txns - sdk_send_random_and_check(looper, txnPoolNodeSet, - sdk_pool_handle, sdk_wallet_steward, 5) + vdr_send_random_and_check(looper, txnPoolNodeSet, + vdr_pool_handle, vdr_wallet_steward, 5) assert initial_view_no == lagging_node.viewNo assert initial_last_ordered == lagging_node.master_last_ordered_3PC @@ -69,12 +69,12 @@ def test_catchup_to_next_view_during_view_change_0_to_1_then_1_to_2(txnPoolNodeS ensure_all_nodes_have_same_data(looper, nodes=other_nodes) # make sure that the pool is functional - sdk_ensure_pool_functional(looper, txnPoolNodeSet, sdk_wallet_steward, sdk_pool_handle) + vdr_ensure_pool_functional(looper, txnPoolNodeSet, vdr_wallet_steward, vdr_pool_handle) @pytest.mark.skip("INDY-2044") def test_catchup_to_next_view_during_view_change_0_to_2(txnPoolNodeSet, looper, - sdk_pool_handle, sdk_wallet_steward): + vdr_pool_handle, vdr_wallet_steward): ''' 1) Lagging node is not a primary for new views 2) All nodes except the lagging one go to view=1 @@ -100,8 +100,8 @@ def test_catchup_to_next_view_during_view_change_0_to_2(txnPoolNodeSet, looper, ensure_all_nodes_have_same_data(looper, nodes=other_nodes) # order some txns - sdk_send_random_and_check(looper, txnPoolNodeSet, - sdk_pool_handle, sdk_wallet_steward, 5) + vdr_send_random_and_check(looper, txnPoolNodeSet, + vdr_pool_handle, vdr_wallet_steward, 5) # view change to viewNo=2 trigger_view_change(txnPoolNodeSet) @@ -112,8 +112,8 @@ def test_catchup_to_next_view_during_view_change_0_to_2(txnPoolNodeSet, looper, ensure_all_nodes_have_same_data(looper, nodes=other_nodes) # order some txns - sdk_send_random_and_check(looper, txnPoolNodeSet, - sdk_pool_handle, sdk_wallet_steward, 5) + vdr_send_random_and_check(looper, txnPoolNodeSet, + vdr_pool_handle, vdr_wallet_steward, 5) assert initial_view_no == lagging_node.viewNo assert initial_last_ordered == lagging_node.master_last_ordered_3PC @@ -125,4 +125,4 @@ def test_catchup_to_next_view_during_view_change_0_to_2(txnPoolNodeSet, looper, ensure_all_nodes_have_same_data(looper, nodes=other_nodes) # make sure that the pool is functional - sdk_ensure_pool_functional(looper, txnPoolNodeSet, sdk_wallet_steward, sdk_pool_handle) + vdr_ensure_pool_functional(looper, txnPoolNodeSet, vdr_wallet_steward, vdr_pool_handle) diff --git a/plenum/test/view_change/test_catchup_to_next_view_during_view_change_by_primary.py b/plenum/test/view_change/test_catchup_to_next_view_during_view_change_by_primary.py index 2e2e035d02..e6c754eeeb 100644 --- a/plenum/test/view_change/test_catchup_to_next_view_during_view_change_by_primary.py +++ b/plenum/test/view_change/test_catchup_to_next_view_during_view_change_by_primary.py @@ -2,9 +2,9 @@ from plenum.common.constants import DOMAIN_LEDGER_ID from plenum.test.delayers import delay_for_view -from plenum.test.helper import checkViewNoForNodes, sdk_send_random_and_check, waitForViewChange, view_change_timeout +from plenum.test.helper import checkViewNoForNodes, vdr_send_random_and_check, waitForViewChange, view_change_timeout from plenum.test.node_catchup.helper import ensure_all_nodes_have_same_data -from plenum.test.node_request.helper import sdk_ensure_pool_functional +from plenum.test.node_request.helper import vdr_ensure_pool_functional from plenum.test.stasher import delay_rules from plenum.test.test_node import checkProtocolInstanceSetup, ensureElectionsDone from plenum.test.view_change_service.helper import trigger_view_change @@ -20,7 +20,7 @@ def tconf(tconf): def test_catchup_to_next_view_during_view_change_by_primary(txnPoolNodeSet, looper, - sdk_pool_handle, sdk_wallet_steward): + vdr_pool_handle, vdr_wallet_steward): ''' 1) Lagging node is a primary for view=1 2) All nodes except the lagging one start a view change (to view=1) @@ -48,8 +48,8 @@ def test_catchup_to_next_view_during_view_change_by_primary(txnPoolNodeSet, loop ensure_all_nodes_have_same_data(looper, nodes=other_nodes) # order some txns - sdk_send_random_and_check(looper, txnPoolNodeSet, - sdk_pool_handle, sdk_wallet_steward, 5) + vdr_send_random_and_check(looper, txnPoolNodeSet, + vdr_pool_handle, vdr_wallet_steward, 5) assert initial_view_no == lagging_node.viewNo assert initial_last_ordered == lagging_node.master_last_ordered_3PC @@ -67,4 +67,4 @@ def test_catchup_to_next_view_during_view_change_by_primary(txnPoolNodeSet, loop ensure_all_nodes_have_same_data(looper, nodes=other_nodes) # make sure that the pool is functional - sdk_ensure_pool_functional(looper, txnPoolNodeSet, sdk_wallet_steward, sdk_pool_handle) + vdr_ensure_pool_functional(looper, txnPoolNodeSet, vdr_wallet_steward, vdr_pool_handle) diff --git a/plenum/test/view_change/test_client_req_during_view_change.py b/plenum/test/view_change/test_client_req_during_view_change.py index 228f9c36bc..01d7db15b2 100644 --- a/plenum/test/view_change/test_client_req_during_view_change.py +++ b/plenum/test/view_change/test_client_req_during_view_change.py @@ -1,7 +1,7 @@ import pytest from plenum.common.constants import NODE, TXN_TYPE, GET_TXN -from plenum.test.helper import sdk_gen_request, checkDiscardMsg +from plenum.test.helper import vdr_gen_request, checkDiscardMsg from plenum.test.test_config_req_handler import READ_CONF, ConfigTestBootstrapClass @@ -18,14 +18,14 @@ def test_node(test_node): def test_client_write_request_discard_in_view_change_with_dict(test_node): test_node.send_nack_to_client = check_nack_msg - msg = sdk_gen_request({TXN_TYPE: NODE}).as_dict + msg = vdr_gen_request({TXN_TYPE: NODE}).as_dict test_node.unpackClientMsg(msg, "frm") checkDiscardMsg([test_node, ], msg, "view change in progress") def test_client_get_request_not_discard_in_view_change_with_dict(test_node): sender = "frm" - msg = sdk_gen_request({TXN_TYPE: GET_TXN}).as_dict + msg = vdr_gen_request({TXN_TYPE: GET_TXN}).as_dict def post_to_client_in_box(received_msg, received_frm): assert received_frm == sender @@ -43,7 +43,7 @@ def discard(received_msg, reason, logMethod, cliOutput): def test_client_read_request_not_discard_in_view_change_with_dict(test_node): sender = "frm" - msg = sdk_gen_request({TXN_TYPE: READ_CONF}).as_dict + msg = vdr_gen_request({TXN_TYPE: READ_CONF}).as_dict def post_to_client_in_box(received_msg, received_frm): assert received_frm == sender @@ -62,7 +62,7 @@ def discard(received_msg, reason, logMethod, cliOutput): def test_client_msg_discard_in_view_change_with_request(test_node): test_node.send_nack_to_client = check_nack_msg - msg = sdk_gen_request({TXN_TYPE: NODE}) + msg = vdr_gen_request({TXN_TYPE: NODE}) test_node.unpackClientMsg(msg, "frm") checkDiscardMsg([test_node, ], msg.as_dict, "view change in progress") diff --git a/plenum/test/view_change/test_client_req_during_view_change_integration.py b/plenum/test/view_change/test_client_req_during_view_change_integration.py index 69d62b74b7..f51d7b08c5 100644 --- a/plenum/test/view_change/test_client_req_during_view_change_integration.py +++ b/plenum/test/view_change/test_client_req_during_view_change_integration.py @@ -1,43 +1,43 @@ import pytest from plenum.common.exceptions import PoolLedgerTimeoutException -from plenum.test.helper import sdk_send_random_and_check, \ - sdk_send_random_requests, sdk_get_and_check_replies -from plenum.test.pool_transactions.helper import sdk_build_get_txn_request, sdk_sign_and_send_prepared_request +from plenum.test.helper import vdr_send_random_and_check, \ + vdr_send_random_requests, vdr_get_and_check_replies +from plenum.test.pool_transactions.helper import vdr_build_get_txn_request, vdr_sign_and_send_prepared_request def test_client_write_request_discard_in_view_change_integration(txnPoolNodeSet, looper, - sdk_pool_handle, - sdk_wallet_client): + vdr_pool_handle, + vdr_wallet_client): ''' Check that client requests sent in view change will discard. ''' - sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle, - sdk_wallet_client, 4) + vdr_send_random_and_check(looper, txnPoolNodeSet, vdr_pool_handle, + vdr_wallet_client, 4) for node in txnPoolNodeSet: node.master_replica._consensus_data.waiting_for_new_view = True - discard_reqs = sdk_send_random_requests(looper, sdk_pool_handle, - sdk_wallet_client, 1) + discard_reqs = vdr_send_random_requests(looper, vdr_pool_handle, + vdr_wallet_client, 1) with pytest.raises(PoolLedgerTimeoutException) as e: - sdk_get_and_check_replies(looper, discard_reqs) + vdr_get_and_check_replies(looper, discard_reqs) def test_client_get_request_not_discard_in_view_change_integration(txnPoolNodeSet, looper, - sdk_pool_handle, - sdk_wallet_client): + vdr_pool_handle, + vdr_wallet_client): ''' Check that client requests sent in view change will discard. ''' for node in txnPoolNodeSet: node.master_replica._consensus_data.waiting_for_new_view = True - _, steward_did = sdk_wallet_client - request = sdk_build_get_txn_request(looper, steward_did, 1) + _, steward_did = vdr_wallet_client + request = vdr_build_get_txn_request(looper, steward_did, 1) - sdk_request = sdk_sign_and_send_prepared_request(looper, - sdk_wallet_client, - sdk_pool_handle, + sdk_request = vdr_sign_and_send_prepared_request(looper, + vdr_wallet_client, + vdr_pool_handle, request) - sdk_get_and_check_replies(looper, [sdk_request]) + vdr_get_and_check_replies(looper, [sdk_request]) diff --git a/plenum/test/view_change/test_complete_with_delayed_view_change.py b/plenum/test/view_change/test_complete_with_delayed_view_change.py index 7137e88b75..9a8ce0f176 100644 --- a/plenum/test/view_change/test_complete_with_delayed_view_change.py +++ b/plenum/test/view_change/test_complete_with_delayed_view_change.py @@ -5,7 +5,7 @@ from plenum.common.constants import PreVCStrategies from plenum.common.messages.node_messages import ViewChangeDone, InstanceChange, NewView, ViewChange -from plenum.test.helper import sdk_send_random_and_check +from plenum.test.helper import vdr_send_random_and_check from plenum.test.node_catchup.helper import ensure_all_nodes_have_same_data from plenum.test.test_node import TestNode, ensureElectionsDone from plenum.test.view_change.helper import ensure_view_change @@ -48,13 +48,13 @@ async def processNodeInBoxWithoutVCDone(self): def test_complete_with_delayed_view_change(looper, txnPoolNodeSet, - sdk_wallet_steward, - sdk_pool_handle): + vdr_wallet_steward, + vdr_pool_handle): def chk_len_stashed_msgs(): # We are waiting for one message from selected primary assert len(stashed_vc_done_msgs) == 1 - sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_steward, REQ_COUNT) + vdr_send_random_and_check(looper, txnPoolNodeSet, vdr_pool_handle, vdr_wallet_steward, REQ_COUNT) slow_node = txnPoolNodeSet[-1] not_processing_view_change_done(slow_node) ensure_view_change(looper, txnPoolNodeSet[:-1]) diff --git a/plenum/test/view_change/test_demote_node_delay_commit_on_one.py b/plenum/test/view_change/test_demote_node_delay_commit_on_one.py index 3337687ed1..89cfe6c987 100644 --- a/plenum/test/view_change/test_demote_node_delay_commit_on_one.py +++ b/plenum/test/view_change/test_demote_node_delay_commit_on_one.py @@ -8,14 +8,14 @@ nodeCount = 8 -def test_demote_node_delay_commit_on_one(looper, txnPoolNodeSet, sdk_pool_handle, - sdk_wallet_stewards, tdir, tconf, allPluginsPath): +def test_demote_node_delay_commit_on_one(looper, txnPoolNodeSet, vdr_pool_handle, + vdr_wallet_stewards, tdir, tconf, allPluginsPath): view_no = txnPoolNodeSet[-1].viewNo slow_node = txnPoolNodeSet[-2] # Demote Node8 but don't allow Node7 to be aware of it. with delay_rules(slow_node.nodeIbStasher, cDelay()): - demote_node(looper, sdk_wallet_stewards[-1], sdk_pool_handle, + demote_node(looper, vdr_wallet_stewards[-1], vdr_pool_handle, txnPoolNodeSet[-1]) del txnPoolNodeSet[-1] diff --git a/plenum/test/view_change/test_disable_view_change.py b/plenum/test/view_change/test_disable_view_change.py index b4877ef3f2..2e4cad595c 100644 --- a/plenum/test/view_change/test_disable_view_change.py +++ b/plenum/test/view_change/test_disable_view_change.py @@ -14,15 +14,15 @@ def test_disable_view_change( looper, txnPoolNodeSet, viewNo, - sdk_pool_handle, - sdk_wallet_steward): + vdr_pool_handle, + vdr_wallet_steward): assert disable_view_change_config assert isinstance(disable_view_change_config.unsafe, set) assert 'disable_view_change' in disable_view_change_config.unsafe simulate_slow_master(looper, txnPoolNodeSet, - sdk_pool_handle, - sdk_wallet_steward) + vdr_pool_handle, + vdr_wallet_steward) with pytest.raises(AssertionError): waitForViewChange(looper, txnPoolNodeSet, expectedViewNo=viewNo + 1) diff --git a/plenum/test/view_change/test_disconnected_node_reconnects_after_view_change.py b/plenum/test/view_change/test_disconnected_node_reconnects_after_view_change.py index 6ee002a89d..380aca1002 100644 --- a/plenum/test/view_change/test_disconnected_node_reconnects_after_view_change.py +++ b/plenum/test/view_change/test_disconnected_node_reconnects_after_view_change.py @@ -2,7 +2,7 @@ from plenum.test import waits from plenum.test.helper import checkViewNoForNodes, waitForViewChange, \ - sdk_send_random_and_check, sdk_send_batches_of_random_and_check + vdr_send_random_and_check, vdr_send_batches_of_random_and_check from plenum.test.node_catchup.helper import ensure_all_nodes_have_same_data from plenum.test.pool_transactions.helper import \ disconnect_node_and_ensure_disconnected, \ @@ -30,7 +30,7 @@ def tconf(tconf): def test_disconnected_node_with_lagged_view_pulls_up_its_view_on_reconnection( - looper, txnPoolNodeSet, sdk_wallet_client, sdk_pool_handle, tconf): + looper, txnPoolNodeSet, vdr_wallet_client, vdr_pool_handle, tconf): """ Verifies that a disconnected node with a lagged view accepts the current view from the other nodes on re-connection. @@ -50,16 +50,16 @@ def test_disconnected_node_with_lagged_view_pulls_up_its_view_on_reconnection( """ checkViewNoForNodes(txnPoolNodeSet, 0) - sdk_send_random_and_check(looper, txnPoolNodeSet, - sdk_pool_handle, sdk_wallet_client, 1) + vdr_send_random_and_check(looper, txnPoolNodeSet, + vdr_pool_handle, vdr_wallet_client, 1) ensure_view_change(looper, txnPoolNodeSet) ensureElectionsDone(looper, txnPoolNodeSet) ensure_all_nodes_have_same_data(looper, txnPoolNodeSet) checkViewNoForNodes(txnPoolNodeSet, 1) - sdk_send_random_and_check(looper, txnPoolNodeSet, - sdk_pool_handle, sdk_wallet_client, 1) + vdr_send_random_and_check(looper, txnPoolNodeSet, + vdr_pool_handle, vdr_wallet_client, 1) lagged_node = getNonPrimaryReplicas(txnPoolNodeSet)[-1].node disconnect_node_and_ensure_disconnected(looper, @@ -68,8 +68,8 @@ def test_disconnected_node_with_lagged_view_pulls_up_its_view_on_reconnection( stopNode=False) other_nodes = list(set(txnPoolNodeSet) - {lagged_node}) - sdk_send_random_and_check(looper, txnPoolNodeSet, - sdk_pool_handle, sdk_wallet_client, 1) + vdr_send_random_and_check(looper, txnPoolNodeSet, + vdr_pool_handle, vdr_wallet_client, 1) ensure_view_change(looper, other_nodes) ensureElectionsDone(looper, other_nodes, @@ -78,8 +78,8 @@ def test_disconnected_node_with_lagged_view_pulls_up_its_view_on_reconnection( checkViewNoForNodes(other_nodes, 2) checkViewNoForNodes([lagged_node], 1) - sdk_send_random_and_check(looper, txnPoolNodeSet, - sdk_pool_handle, sdk_wallet_client, 1) + vdr_send_random_and_check(looper, txnPoolNodeSet, + vdr_pool_handle, vdr_wallet_client, 1) ensure_view_change(looper, other_nodes) ensureElectionsDone(looper, other_nodes, @@ -88,8 +88,8 @@ def test_disconnected_node_with_lagged_view_pulls_up_its_view_on_reconnection( checkViewNoForNodes(other_nodes, 3) checkViewNoForNodes([lagged_node], 1) - sdk_send_random_and_check(looper, txnPoolNodeSet, - sdk_pool_handle, sdk_wallet_client, 1) + vdr_send_random_and_check(looper, txnPoolNodeSet, + vdr_pool_handle, vdr_wallet_client, 1) reconnect_node_and_ensure_connected(looper, txnPoolNodeSet, lagged_node) @@ -100,11 +100,11 @@ def test_disconnected_node_with_lagged_view_pulls_up_its_view_on_reconnection( customTimeout=waits.expectedPoolElectionTimeout( len(txnPoolNodeSet))) - sdk_send_batches_of_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_client, num_reqs=2 * tconf.CHK_FREQ) + vdr_send_batches_of_random_and_check(looper, txnPoolNodeSet, vdr_pool_handle, vdr_wallet_client, num_reqs=2 * tconf.CHK_FREQ) ensure_all_nodes_have_same_data(looper, txnPoolNodeSet) checkViewNoForNodes(txnPoolNodeSet, 3) ensureElectionsDone(looper, txnPoolNodeSet) - sdk_send_random_and_check(looper, txnPoolNodeSet, - sdk_pool_handle, sdk_wallet_client, 1) + vdr_send_random_and_check(looper, txnPoolNodeSet, + vdr_pool_handle, vdr_wallet_client, 1) ensure_all_nodes_have_same_data(looper, txnPoolNodeSet) diff --git a/plenum/test/view_change/test_master_primary_different_from_previous.py b/plenum/test/view_change/test_master_primary_different_from_previous.py index 706c2fad9e..94a01831ca 100644 --- a/plenum/test/view_change/test_master_primary_different_from_previous.py +++ b/plenum/test/view_change/test_master_primary_different_from_previous.py @@ -3,7 +3,7 @@ import pytest from plenum.test.helper import checkViewNoForNodes, \ - sdk_send_random_and_check, countDiscarded + vdr_send_random_and_check, countDiscarded from plenum.test.malicious_behaviors_node import slow_primary from plenum.test.test_node import getPrimaryReplica, ensureElectionsDone from plenum.test.view_change.helper import provoke_and_wait_for_view_change, ensure_view_change @@ -14,7 +14,7 @@ def test_master_primary_different_from_previous(txnPoolNodeSet, looper, - sdk_pool_handle, sdk_wallet_client): + vdr_pool_handle, vdr_wallet_client): """ After a view change, primary must be different from previous primary for master instance, it does not matter for other instance. The primary is @@ -36,4 +36,4 @@ def test_master_primary_different_from_previous(txnPoolNodeSet, looper, pr.outBoxTestStasher.resetDelays() # The new primary can still process requests - sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_client, 5) + vdr_send_random_and_check(looper, txnPoolNodeSet, vdr_pool_handle, vdr_wallet_client, 5) diff --git a/plenum/test/view_change/test_missing_pp_before_starting_vc.py b/plenum/test/view_change/test_missing_pp_before_starting_vc.py index caf068d7ca..a3af35cb81 100644 --- a/plenum/test/view_change/test_missing_pp_before_starting_vc.py +++ b/plenum/test/view_change/test_missing_pp_before_starting_vc.py @@ -2,9 +2,9 @@ from plenum.common.messages.node_messages import PrePrepare from plenum.test.delayers import delay_3pc -from plenum.test.helper import sdk_send_random_requests, check_missing_pre_prepares, max_3pc_batch_limits +from plenum.test.helper import vdr_send_random_requests, check_missing_pre_prepares, max_3pc_batch_limits from plenum.test.node_catchup.helper import ensure_all_nodes_have_same_data -from plenum.test.node_request.helper import sdk_ensure_pool_functional +from plenum.test.node_request.helper import vdr_ensure_pool_functional from plenum.test.stasher import delay_rules from plenum.test.test_node import ensureElectionsDone, check_not_in_view_change from plenum.test.view_change.helper import ensure_view_change @@ -18,7 +18,7 @@ def tconf(tconf): def test_missing_pp_before_starting_vc(tconf, txnPoolNodeSet, looper, - sdk_pool_handle, sdk_wallet_steward): + vdr_pool_handle, vdr_wallet_steward): ''' - all nodes delay PrePrepares for viewNo=1 with ppSeqNo<4 - all nodes go to view=1 @@ -36,8 +36,8 @@ def test_missing_pp_before_starting_vc(tconf, txnPoolNodeSet, looper, looper.run(eventually(check_not_in_view_change, txnPoolNodeSet)) # 3. send requests - sdk_send_random_requests(looper, sdk_pool_handle, - sdk_wallet_steward, 10) + vdr_send_random_requests(looper, vdr_pool_handle, + vdr_wallet_steward, 10) # 4. do view change for view=2 ensure_view_change(looper, txnPoolNodeSet) @@ -45,4 +45,4 @@ def test_missing_pp_before_starting_vc(tconf, txnPoolNodeSet, looper, # 5. ensure everything is fine ensureElectionsDone(looper, txnPoolNodeSet) ensure_all_nodes_have_same_data(looper, nodes=txnPoolNodeSet) - sdk_ensure_pool_functional(looper, txnPoolNodeSet, sdk_wallet_steward, sdk_pool_handle) + vdr_ensure_pool_functional(looper, txnPoolNodeSet, vdr_wallet_steward, vdr_pool_handle) diff --git a/plenum/test/view_change/test_new_node_joins_after_view_change.py b/plenum/test/view_change/test_new_node_joins_after_view_change.py index cb8027131e..1af93be3c1 100644 --- a/plenum/test/view_change/test_new_node_joins_after_view_change.py +++ b/plenum/test/view_change/test_new_node_joins_after_view_change.py @@ -9,7 +9,7 @@ from plenum.test.view_change.helper import ensure_view_change, start_stopped_node from stp_core.loop.eventually import eventually -from plenum.test.helper import checkViewNoForNodes, sdk_send_random_and_check, waitForViewChange +from plenum.test.helper import checkViewNoForNodes, vdr_send_random_and_check, waitForViewChange from plenum.test.pool_transactions.conftest import sdk_node_theta_added_fixture from plenum.test.primary_selection.conftest import sdk_one_node_added_fixture @@ -20,14 +20,14 @@ @pytest.fixture(scope='module') def new_node_in_correct_view(looper, txnPoolNodeSet, - sdk_one_node_added, sdk_pool_handle, sdk_wallet_client): + sdk_one_node_added, vdr_pool_handle, vdr_wallet_client): for _ in range(5): - sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_client, 2) + vdr_send_random_and_check(looper, txnPoolNodeSet, vdr_pool_handle, vdr_wallet_client, 2) new_node = sdk_one_node_added looper.run(eventually(checkViewNoForNodes, txnPoolNodeSet, retryWait=1, timeout=10)) - sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle, - sdk_wallet_client, 2) + vdr_send_random_and_check(looper, txnPoolNodeSet, vdr_pool_handle, + vdr_wallet_client, 2) def test_new_node_has_same_view_as_others(new_node_in_correct_view): @@ -40,8 +40,8 @@ def test_old_non_primary_restart_after_view_change(new_node_in_correct_view, looper, txnPoolNodeSet, tdir, allPluginsPath, tconf, - sdk_pool_handle, - sdk_wallet_client): + vdr_pool_handle, + vdr_wallet_client): """ An existing non-primary node crashes and then view change happens, the crashed node comes back up after view change @@ -55,15 +55,15 @@ def test_old_non_primary_restart_after_view_change(new_node_in_correct_view, remaining_nodes = list(set(txnPoolNodeSet) - {node_to_stop}) # Send some requests before view change - sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle, - sdk_wallet_client, 5) + vdr_send_random_and_check(looper, txnPoolNodeSet, vdr_pool_handle, + vdr_wallet_client, 5) old_view_no = txnPoolNodeSet[0].viewNo ensure_view_change(looper, remaining_nodes, custom_timeout=tconf.NEW_VIEW_TIMEOUT) waitForViewChange(looper, remaining_nodes, expectedViewNo=old_view_no + 1) ensureElectionsDone(looper, remaining_nodes) # Send some requests after view change - sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle, - sdk_wallet_client, 5) + vdr_send_random_and_check(looper, txnPoolNodeSet, vdr_pool_handle, + vdr_wallet_client, 5) restarted_node = start_stopped_node(node_to_stop, looper, tconf, tdir, allPluginsPath) diff --git a/plenum/test/view_change/test_new_primary_lagging_behind.py b/plenum/test/view_change/test_new_primary_lagging_behind.py index 848275f20a..ea01cfbaf3 100644 --- a/plenum/test/view_change/test_new_primary_lagging_behind.py +++ b/plenum/test/view_change/test_new_primary_lagging_behind.py @@ -1,7 +1,7 @@ import pytest from plenum.test.delayers import pDelay, cDelay -from plenum.test.helper import sdk_send_random_and_check, checkViewNoForNodes +from plenum.test.helper import vdr_send_random_and_check, checkViewNoForNodes from plenum.test.node_catchup.helper import ensure_all_nodes_have_same_data from plenum.test.stasher import delay_rules from plenum.test.test_node import ensureElectionsDone, check_not_in_view_change @@ -29,8 +29,8 @@ def tconf(tconf): def test_new_primary_lagging_behind(looper, txnPoolNodeSet, - sdk_wallet_client, - sdk_pool_handle, + vdr_wallet_client, + vdr_pool_handle, tconf): initial_view_no = checkViewNoForNodes(txnPoolNodeSet) next_primary_name = get_next_primary_name(txnPoolNodeSet, initial_view_no + 1) @@ -39,7 +39,7 @@ def test_new_primary_lagging_behind(looper, expected_primary_name = get_next_primary_name(txnPoolNodeSet, initial_view_no + 2) # Next primary cannot stabilize 1 checkpoint with delay_rules(next_primary.nodeIbStasher, cDelay(), pDelay()): - sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_client, CHK_FREQ) + vdr_send_random_and_check(looper, txnPoolNodeSet, vdr_pool_handle, vdr_wallet_client, CHK_FREQ) ensure_view_change(looper, txnPoolNodeSet) looper.run(eventually(check_not_in_view_change, txnPoolNodeSet, timeout=2 * tconf.NEW_VIEW_TIMEOUT)) @@ -51,5 +51,5 @@ def test_new_primary_lagging_behind(looper, assert checkViewNoForNodes(txnPoolNodeSet) == initial_view_no + 2 # send CHK_FREQ reqs so that slow node will start catch-up - sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_client, CHK_FREQ) + vdr_send_random_and_check(looper, txnPoolNodeSet, vdr_pool_handle, vdr_wallet_client, CHK_FREQ) ensure_all_nodes_have_same_data(looper, txnPoolNodeSet, custom_timeout=30) diff --git a/plenum/test/view_change/test_no_instance_change_before_node_is_ready.py b/plenum/test/view_change/test_no_instance_change_before_node_is_ready.py index 412f0f4a80..ada8f26597 100644 --- a/plenum/test/view_change/test_no_instance_change_before_node_is_ready.py +++ b/plenum/test/view_change/test_no_instance_change_before_node_is_ready.py @@ -17,7 +17,7 @@ def tconf(tconf): def test_no_instance_change_on_primary_disconnection_for_not_ready_node( looper, txnPoolNodeSet, tdir, tconf, - allPluginsPath, sdk_pool_handle, sdk_wallet_steward): + allPluginsPath, vdr_pool_handle, vdr_wallet_steward): """ Test steps: 1. create a new node, but don't add it to the pool (so not send NODE txn), so that the node is not ready. @@ -48,8 +48,8 @@ def test_no_instance_change_on_primary_disconnection_for_not_ready_node( node_ha, client_ha, txnPoolNodeSet, - sdk_pool_handle, - sdk_wallet_steward, + vdr_pool_handle, + vdr_wallet_steward, bls_key, key_proof) diff --git a/plenum/test/view_change/test_no_propagate_request_on_different_last_ordered_before_vc.py b/plenum/test/view_change/test_no_propagate_request_on_different_last_ordered_before_vc.py index dc97da0e0c..4ffbe18d0f 100644 --- a/plenum/test/view_change/test_no_propagate_request_on_different_last_ordered_before_vc.py +++ b/plenum/test/view_change/test_no_propagate_request_on_different_last_ordered_before_vc.py @@ -1,7 +1,7 @@ from plenum.server.node import Node from plenum.test.delayers import cDelay, pDelay, ppDelay -from plenum.test.helper import sdk_send_random_and_check, \ - sdk_send_random_requests, sdk_get_replies, sdk_check_reply +from plenum.test.helper import vdr_send_random_and_check, \ + vdr_send_random_requests, vdr_get_replies, vdr_check_reply from plenum.test.node_catchup.helper import ensure_all_nodes_have_same_data from plenum.test.stasher import delay_rules from plenum.test.test_node import ensureElectionsDone, getPrimaryReplica, check_not_in_view_change @@ -10,7 +10,7 @@ def test_no_propagate_request_on_different_last_ordered_on_backup_before_vc(looper, txnPoolNodeSet, - sdk_pool_handle, sdk_wallet_client): + vdr_pool_handle, vdr_wallet_client): ''' 1. Send random request 2. Make 3 node on backup instance slow in getting commits @@ -19,8 +19,8 @@ def test_no_propagate_request_on_different_last_ordered_on_backup_before_vc(loop 5. reset delays => we expect that all nodes and all instances have the same last ordered ''' - sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle, - sdk_wallet_client, 1) + vdr_send_random_and_check(looper, txnPoolNodeSet, vdr_pool_handle, + vdr_wallet_client, 1) slow_instance = 1 slow_nodes = txnPoolNodeSet[1:4] fast_nodes = [n for n in txnPoolNodeSet if n not in slow_nodes] @@ -30,8 +30,8 @@ def test_no_propagate_request_on_different_last_ordered_on_backup_before_vc(loop backup_last_pp_seq_no = txnPoolNodeSet[0].replicas[slow_instance].last_ordered_3pc[1] with delay_rules(nodes_stashers, cDelay(instId=slow_instance)): # send one request - sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle, - sdk_wallet_client, 1) + vdr_send_random_and_check(looper, txnPoolNodeSet, vdr_pool_handle, + vdr_wallet_client, 1) master_pp_seq_no += 1 looper.run( eventually(check_last_ordered, @@ -62,15 +62,15 @@ def test_no_propagate_request_on_different_last_ordered_on_backup_before_vc(loop txnPoolNodeSet[0].master_replica.instId, (last_view_no + 1, master_pp_seq_no))) - sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle, - sdk_wallet_client, 1) + vdr_send_random_and_check(looper, txnPoolNodeSet, vdr_pool_handle, + vdr_wallet_client, 1) master_pp_seq_no += 1 assert all(0 == node.spylog.count(node.request_propagates) for node in txnPoolNodeSet) def test_no_propagate_request_on_different_prepares_on_backup_before_vc(looper, txnPoolNodeSet, - sdk_pool_handle, sdk_wallet_client): + vdr_pool_handle, vdr_wallet_client): ''' 1. Send random request 2. Make 3 node on backup instance slow in getting prepares @@ -79,8 +79,8 @@ def test_no_propagate_request_on_different_prepares_on_backup_before_vc(looper, 5. reset delays => we expect that all nodes and all instances have the same last ordered ''' - sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle, - sdk_wallet_client, 1) + vdr_send_random_and_check(looper, txnPoolNodeSet, vdr_pool_handle, + vdr_wallet_client, 1) slow_instance = 1 slow_nodes = txnPoolNodeSet[1:3] fast_nodes = [n for n in txnPoolNodeSet if n not in slow_nodes] @@ -91,8 +91,8 @@ def test_no_propagate_request_on_different_prepares_on_backup_before_vc(looper, with delay_rules(nodes_stashers, pDelay(instId=slow_instance)): with delay_rules(nodes_stashers, ppDelay(instId=slow_instance)): # send one request - sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle, - sdk_wallet_client, 1) + vdr_send_random_and_check(looper, txnPoolNodeSet, vdr_pool_handle, + vdr_wallet_client, 1) master_pp_seq_no += 1 looper.run( eventually(is_prepared, @@ -122,8 +122,8 @@ def test_no_propagate_request_on_different_prepares_on_backup_before_vc(looper, txnPoolNodeSet[0].master_replica.instId, (last_view_no + 1, master_pp_seq_no))) - sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle, - sdk_wallet_client, 1) + vdr_send_random_and_check(looper, txnPoolNodeSet, vdr_pool_handle, + vdr_wallet_client, 1) master_pp_seq_no += 1 looper.run( eventually(check_last_ordered, @@ -135,12 +135,12 @@ def test_no_propagate_request_on_different_prepares_on_backup_before_vc(looper, def test_no_propagate_request_on_different_last_ordered_on_master_before_vc(looper, txnPoolNodeSet, - sdk_pool_handle, sdk_wallet_client): + vdr_pool_handle, vdr_wallet_client): ''' Send random request and do view change then fast_nodes (1, 4 - without primary after next view change) are already ordered transaction on master and slow_nodes are not. Check ordering on slow_nodes.''' - sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle, - sdk_wallet_client, 1) + vdr_send_random_and_check(looper, txnPoolNodeSet, vdr_pool_handle, + vdr_wallet_client, 1) master_instance = txnPoolNodeSet[0].master_replica.instId slow_nodes = txnPoolNodeSet[1:3] fast_nodes = [n for n in txnPoolNodeSet if n not in slow_nodes] @@ -149,8 +149,8 @@ def test_no_propagate_request_on_different_last_ordered_on_master_before_vc(loop batches_count = old_last_ordered[1] with delay_rules(nodes_stashers, cDelay()): # send one request - requests = sdk_send_random_requests(looper, sdk_pool_handle, - sdk_wallet_client, 1) + requests = vdr_send_random_requests(looper, vdr_pool_handle, + vdr_wallet_client, 1) batches_count += 1 last_ordered_for_slow = slow_nodes[0].master_replica.last_ordered_3pc old_view_no = txnPoolNodeSet[0].viewNo @@ -168,9 +168,9 @@ def test_no_propagate_request_on_different_last_ordered_on_master_before_vc(loop batches_count += 1 - replies = sdk_get_replies(looper, requests) + replies = vdr_get_replies(looper, requests) for reply in replies: - sdk_check_reply(reply) + vdr_check_reply(reply) # a new primary will send a PrePrepare for the new view looper.run(eventually(check_last_ordered, txnPoolNodeSet, diff --git a/plenum/test/view_change/test_node_detecting_lag_from_view_change_messages.py b/plenum/test/view_change/test_node_detecting_lag_from_view_change_messages.py index 65ad653f9a..90aa3e2b1e 100644 --- a/plenum/test/view_change/test_node_detecting_lag_from_view_change_messages.py +++ b/plenum/test/view_change/test_node_detecting_lag_from_view_change_messages.py @@ -5,7 +5,7 @@ from plenum.common.util import compare_3PC_keys from plenum.test.delayers import delay_3pc_messages, icDelay, cDelay from plenum.test.helper import send_reqs_batches_and_get_suff_replies, \ - sdk_send_random_requests + vdr_send_random_requests from plenum.test.node_catchup.helper import ensure_all_nodes_have_same_data from plenum.test.spy_helpers import get_count from plenum.test.test_node import getNonPrimaryReplicas @@ -16,8 +16,8 @@ @pytest.mark.skip(reason='Pending complete implementation') def test_node_detecting_lag_from_view_change_done_messages(txnPoolNodeSet, looper, - sdk_pool_handle, - sdk_wallet_client, + vdr_pool_handle, + vdr_wallet_client, tconf): """ A node is slow and after view change starts, it marks it's `last_prepared` @@ -29,8 +29,8 @@ def test_node_detecting_lag_from_view_change_done_messages(txnPoolNodeSet, Also delay processing of COMMITs and INSTANCE_CHANGEs by other nodes """ send_reqs_batches_and_get_suff_replies(looper, txnPoolNodeSet, - sdk_pool_handle, - sdk_wallet_client, + vdr_pool_handle, + vdr_wallet_client, 2 * 3, 3) ensure_all_nodes_have_same_data(looper, txnPoolNodeSet) @@ -51,7 +51,7 @@ def test_node_detecting_lag_from_view_change_done_messages(txnPoolNodeSet, reqs = [] for i in range(10): # fix if unskip - reqs = reqs + sdk_send_random_requests() + reqs = reqs + vdr_send_random_requests() looper.runFor(.2) def chk1(): diff --git a/plenum/test/view_change/test_old_view_pre_prepare_reply_processing.py b/plenum/test/view_change/test_old_view_pre_prepare_reply_processing.py index 931ab89a4a..ab05600cfc 100644 --- a/plenum/test/view_change/test_old_view_pre_prepare_reply_processing.py +++ b/plenum/test/view_change/test_old_view_pre_prepare_reply_processing.py @@ -1,11 +1,11 @@ from plenum.common.messages.node_messages import OldViewPrePrepareReply, PrePrepare from plenum.server.consensus.ordering_service import OrderingService from plenum.test.delayers import msg_rep_delay, ppDelay, old_view_pp_request_delay -from plenum.test.node_request.helper import sdk_ensure_pool_functional +from plenum.test.node_request.helper import vdr_ensure_pool_functional from plenum.test.stasher import delay_rules, delay_rules_without_processing from plenum.test.view_change.helper import ensure_all_nodes_have_same_data from plenum.common.constants import PREPREPARE -from plenum.test.helper import sdk_send_random_and_check, waitForViewChange +from plenum.test.helper import vdr_send_random_and_check, waitForViewChange from plenum.test.view_change_service.helper import trigger_view_change from stp_core.common.log import getlogger @@ -18,8 +18,8 @@ def test_old_view_pre_prepare_reply_processing(looper, txnPoolNodeSet, tconf, - allPluginsPath, sdk_pool_handle, - sdk_wallet_steward, + allPluginsPath, vdr_pool_handle, + vdr_wallet_steward, monkeypatch): """ Test steps: @@ -39,13 +39,13 @@ def test_old_view_pre_prepare_reply_processing(looper, txnPoolNodeSet, tconf, ensureElectionsDone(looper, txnPoolNodeSet, customTimeout=tconf.NEW_VIEW_TIMEOUT) timeout = waits.expectedPoolCatchupTime(nodeCount=len(txnPoolNodeSet)) ensure_all_nodes_have_same_data(looper, txnPoolNodeSet, custom_timeout=timeout) - sdk_send_random_and_check(looper, txnPoolNodeSet, - sdk_pool_handle, sdk_wallet_steward, 1) + vdr_send_random_and_check(looper, txnPoolNodeSet, + vdr_pool_handle, vdr_wallet_steward, 1) with delay_rules_without_processing(slow_node.nodeIbStasher, ppDelay(), msg_rep_delay(types_to_delay=[PREPREPARE])): - sdk_send_random_and_check(looper, txnPoolNodeSet, - sdk_pool_handle, sdk_wallet_steward, 1) + vdr_send_random_and_check(looper, txnPoolNodeSet, + vdr_pool_handle, vdr_wallet_steward, 1) with delay_rules([n.nodeIbStasher for n in other_nodes], old_view_pp_request_delay()): old_sender = malicious_node.master_replica._ordering_service._send @@ -81,4 +81,4 @@ def chk(): looper.run(eventually(chk)) ensure_all_nodes_have_same_data(looper, nodes=txnPoolNodeSet) - sdk_ensure_pool_functional(looper, txnPoolNodeSet, sdk_wallet_steward, sdk_pool_handle) \ No newline at end of file + vdr_ensure_pool_functional(looper, txnPoolNodeSet, vdr_wallet_steward, vdr_pool_handle) \ No newline at end of file diff --git a/plenum/test/view_change/test_pp_seq_no_starts_from_1.py b/plenum/test/view_change/test_pp_seq_no_starts_from_1.py index 726c71ef58..b756b4b66e 100644 --- a/plenum/test/view_change/test_pp_seq_no_starts_from_1.py +++ b/plenum/test/view_change/test_pp_seq_no_starts_from_1.py @@ -1,7 +1,7 @@ import pytest from plenum.test.helper import checkViewNoForNodes from plenum.test.view_change.helper import ensure_view_change -from plenum.test.helper import sdk_send_random_and_check +from plenum.test.helper import vdr_send_random_and_check # make sure that we send each reqeust individually to count pp_seq_no @@ -19,7 +19,7 @@ def reset(): def test_pp_seq_not_starts_from_0_in_new_view(tconf, txnPoolNodeSet, looper, - sdk_pool_handle, sdk_wallet_client): + vdr_pool_handle, vdr_wallet_client): # This test fails since last ordered pre-prepare sequence number is old_view_no = checkViewNoForNodes(txnPoolNodeSet) @@ -30,7 +30,7 @@ def chk(count): batches_count = 0 chk(batches_count) - sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_client, 5) + vdr_send_random_and_check(looper, txnPoolNodeSet, vdr_pool_handle, vdr_wallet_client, 5) batches_count += 5 chk(batches_count) @@ -39,10 +39,10 @@ def chk(count): batches_count += 1 chk(batches_count) # After view_change, master primary must initiate 3pc batch - sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_client, 1) + vdr_send_random_and_check(looper, txnPoolNodeSet, vdr_pool_handle, vdr_wallet_client, 1) batches_count += 1 chk(batches_count) # new request for new view => last ordered 3PC is (0,2) - sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_client, 5) + vdr_send_random_and_check(looper, txnPoolNodeSet, vdr_pool_handle, vdr_wallet_client, 5) batches_count += 5 chk(batches_count) diff --git a/plenum/test/view_change/test_pre_vc_strategy_3PC_msgs.py b/plenum/test/view_change/test_pre_vc_strategy_3PC_msgs.py index 029e2f9ecc..cd78901bd4 100644 --- a/plenum/test/view_change/test_pre_vc_strategy_3PC_msgs.py +++ b/plenum/test/view_change/test_pre_vc_strategy_3PC_msgs.py @@ -4,8 +4,8 @@ from plenum.common.util import get_utc_epoch -def test_accept_all_3PC_msgs(create_node_and_not_start, looper): - node = create_node_and_not_start +def test_accept_all_3PC_msgs(vdr_create_node_and_not_start, looper): + node = vdr_create_node_and_not_start preprepare = PrePrepare( 0, 0, diff --git a/plenum/test/view_change/test_primary_send_incorrect_pp.py b/plenum/test/view_change/test_primary_send_incorrect_pp.py index 883a282945..72b7cefcd4 100644 --- a/plenum/test/view_change/test_primary_send_incorrect_pp.py +++ b/plenum/test/view_change/test_primary_send_incorrect_pp.py @@ -3,12 +3,12 @@ from plenum.common.messages.node_messages import PrePrepare from plenum.server.consensus.ordering_service import OrderingService from plenum.test.delayers import msg_rep_delay -from plenum.test.node_request.helper import sdk_ensure_pool_functional +from plenum.test.node_request.helper import vdr_ensure_pool_functional from plenum.test.stasher import delay_rules from plenum.test.view_change.helper import ensure_all_nodes_have_same_data from plenum.common.constants import PREPREPARE -from plenum.test.helper import sdk_send_random_and_check, waitForViewChange, sdk_send_random_request, \ - sdk_get_and_check_replies +from plenum.test.helper import vdr_send_random_and_check, waitForViewChange, vdr_send_random_request, \ + vdr_get_and_check_replies from plenum.test.view_change_service.helper import trigger_view_change from stp_core.common.log import getlogger @@ -20,8 +20,8 @@ def test_primary_send_incorrect_pp(looper, txnPoolNodeSet, tconf, - allPluginsPath, sdk_pool_handle, - sdk_wallet_steward, + allPluginsPath, vdr_pool_handle, + vdr_wallet_steward, monkeypatch): """ Test steps: @@ -38,8 +38,8 @@ def test_primary_send_incorrect_pp(looper, txnPoolNodeSet, tconf, other_nodes = [n for n in txnPoolNodeSet if n not in [slow_node, malicious_primary]] timeout = waits.expectedPoolCatchupTime(nodeCount=len(txnPoolNodeSet)) ensure_all_nodes_have_same_data(looper, txnPoolNodeSet, custom_timeout=timeout) - sdk_send_random_and_check(looper, txnPoolNodeSet, - sdk_pool_handle, sdk_wallet_steward, 1) + vdr_send_random_and_check(looper, txnPoolNodeSet, + vdr_pool_handle, vdr_wallet_steward, 1) old_sender = malicious_primary.master_replica._ordering_service._send def patched_sender(msg, dst=None, stat=None): @@ -60,7 +60,7 @@ def patched_sender(msg, dst=None, stat=None): with delay_rules(slow_node.nodeIbStasher, msg_rep_delay(types_to_delay=[PREPREPARE])): preprepare_process_num = slow_node.master_replica._ordering_service.spylog.count( OrderingService.process_preprepare) - resp_task = sdk_send_random_request(looper, sdk_pool_handle, sdk_wallet_steward) + resp_task = vdr_send_random_request(looper, vdr_pool_handle, vdr_wallet_steward) def chk(): assert preprepare_process_num + 1 == slow_node.master_replica._ordering_service.spylog.count( @@ -68,9 +68,9 @@ def chk(): looper.run(eventually(chk)) - _, j_resp = sdk_get_and_check_replies(looper, [resp_task])[0] - sdk_send_random_and_check(looper, txnPoolNodeSet, - sdk_pool_handle, sdk_wallet_steward, 1) + _, j_resp = vdr_get_and_check_replies(looper, [resp_task])[0] + vdr_send_random_and_check(looper, txnPoolNodeSet, + vdr_pool_handle, vdr_wallet_steward, 1) trigger_view_change(txnPoolNodeSet) ensure_all_nodes_have_same_data(looper, nodes=txnPoolNodeSet) @@ -79,4 +79,4 @@ def chk(): ensureElectionsDone(looper=looper, nodes=txnPoolNodeSet, instances_list=[0, 1]) ensure_all_nodes_have_same_data(looper, nodes=txnPoolNodeSet) - sdk_ensure_pool_functional(looper, txnPoolNodeSet, sdk_wallet_steward, sdk_pool_handle) + vdr_ensure_pool_functional(looper, txnPoolNodeSet, vdr_wallet_steward, vdr_pool_handle) diff --git a/plenum/test/view_change/test_queueing_req_from_future_view.py b/plenum/test/view_change/test_queueing_req_from_future_view.py index e2006d0efa..260cba7cad 100644 --- a/plenum/test/view_change/test_queueing_req_from_future_view.py +++ b/plenum/test/view_change/test_queueing_req_from_future_view.py @@ -3,8 +3,8 @@ from stp_core.loop.eventually import eventually from stp_core.common.log import getlogger from plenum.test.delayers import icDelay, vc_delay -from plenum.test.helper import sdk_send_random_requests, \ - sdk_get_replies, sdk_send_random_and_check +from plenum.test.helper import vdr_send_random_requests, \ + vdr_get_replies, vdr_send_random_and_check from plenum.test.test_node import get_last_master_non_primary_node nodeCount = 7 @@ -14,7 +14,7 @@ # noinspection PyIncorrectDocstring def testQueueingReqFromFutureView(delayed_perf_chk, looper, txnPoolNodeSet, - sdk_pool_handle, sdk_wallet_client): + vdr_pool_handle, vdr_wallet_client): """ Test if every node queues 3 Phase requests(PRE-PREPARE, PREPARE and COMMIT) that come from a view which is greater than the current view. @@ -51,13 +51,13 @@ def chk_fut_view(is_empty): [lagging_node]) # send more requests that will be queued for the lagged node - reqs = sdk_send_random_requests(looper, sdk_pool_handle, - sdk_wallet_client, 5) + reqs = vdr_send_random_requests(looper, vdr_pool_handle, + vdr_wallet_client, 5) l = looper.run(eventually(chk_fut_view, False, retryWait=1)) logger.debug('{} has {} messages for future views' .format(lagging_node, l)) - sdk_get_replies(looper, reqs) + vdr_get_replies(looper, reqs) # reset delays for the lagging_node node so that it finally makes view # change lagging_node.reset_delays_and_process_delayeds() @@ -68,5 +68,5 @@ def chk_fut_view(is_empty): logger.debug('{} exhausted pending messages for future views' .format(lagging_node)) - sdk_send_random_and_check(looper, txnPoolNodeSet, - sdk_pool_handle, sdk_wallet_client, 2) + vdr_send_random_and_check(looper, txnPoolNodeSet, + vdr_pool_handle, vdr_wallet_client, 2) diff --git a/plenum/test/view_change/test_re_order_pre_prepares.py b/plenum/test/view_change/test_re_order_pre_prepares.py index 4226b9b22b..70f8881e92 100644 --- a/plenum/test/view_change/test_re_order_pre_prepares.py +++ b/plenum/test/view_change/test_re_order_pre_prepares.py @@ -4,9 +4,9 @@ from plenum.common.messages.internal_messages import ViewChangeStarted, NewViewCheckpointsApplied from plenum.server.consensus.utils import preprepare_to_batch_id from plenum.test.delayers import cDelay, pDelay -from plenum.test.helper import sdk_send_random_and_check, max_3pc_batch_limits +from plenum.test.helper import vdr_send_random_and_check, max_3pc_batch_limits from plenum.test.node_catchup.helper import waitNodeDataEquality -from plenum.test.node_request.helper import sdk_ensure_pool_functional +from plenum.test.node_request.helper import vdr_ensure_pool_functional from plenum.test.stasher import delay_rules_without_processing @@ -17,14 +17,14 @@ def tconf(tconf): def test_re_order_pre_prepares(looper, txnPoolNodeSet, - sdk_wallet_client, sdk_pool_handle): + vdr_wallet_client, vdr_pool_handle): # 1. drop Prepares and Commits on 4thNode # Order a couple of requests on Nodes 1-3 lagging_node = txnPoolNodeSet[-1] other_nodes = txnPoolNodeSet[:-1] with delay_rules_without_processing(lagging_node.nodeIbStasher, cDelay(), pDelay()): - sdk_send_random_and_check(looper, txnPoolNodeSet, - sdk_pool_handle, sdk_wallet_client, 3) + vdr_send_random_and_check(looper, txnPoolNodeSet, + vdr_pool_handle, vdr_wallet_client, 3) assert all(n.master_last_ordered_3PC == (0, 3) for n in other_nodes) # 2. simulate view change start so that @@ -61,5 +61,5 @@ def test_re_order_pre_prepares(looper, txnPoolNodeSet, waitNodeDataEquality(looper, lagging_node, *other_nodes) assert lagging_node.master_last_ordered_3PC == (0, 4) - sdk_ensure_pool_functional(looper, txnPoolNodeSet, sdk_wallet_client, sdk_pool_handle) + vdr_ensure_pool_functional(looper, txnPoolNodeSet, vdr_wallet_client, vdr_pool_handle) diff --git a/plenum/test/view_change/test_re_order_pre_prepares_for_lagged.py b/plenum/test/view_change/test_re_order_pre_prepares_for_lagged.py index f73d3f367a..536868dbdd 100644 --- a/plenum/test/view_change/test_re_order_pre_prepares_for_lagged.py +++ b/plenum/test/view_change/test_re_order_pre_prepares_for_lagged.py @@ -5,9 +5,9 @@ from plenum.common.messages.node_messages import NewView from plenum.server.consensus.utils import preprepare_to_batch_id from plenum.test.delayers import delay_3pc, msg_rep_delay -from plenum.test.helper import sdk_send_random_and_check, max_3pc_batch_limits +from plenum.test.helper import vdr_send_random_and_check, max_3pc_batch_limits from plenum.test.node_catchup.helper import waitNodeDataEquality -from plenum.test.node_request.helper import sdk_ensure_pool_functional +from plenum.test.node_request.helper import vdr_ensure_pool_functional from plenum.test.stasher import delay_rules_without_processing @@ -18,14 +18,14 @@ def tconf(tconf): def test_re_order_pre_prepares_no_pre_prepares(looper, txnPoolNodeSet, - sdk_wallet_client, sdk_pool_handle): + vdr_wallet_client, vdr_pool_handle): # 1. drop PrePrepars, Prepares and Commits on 4thNode # Order a couple of requests on Nodes 1-3 lagging_node = txnPoolNodeSet[-1] other_nodes = txnPoolNodeSet[:-1] with delay_rules_without_processing(lagging_node.nodeIbStasher, delay_3pc()): - sdk_send_random_and_check(looper, txnPoolNodeSet, - sdk_pool_handle, sdk_wallet_client, 3) + vdr_send_random_and_check(looper, txnPoolNodeSet, + vdr_pool_handle, vdr_wallet_client, 3) assert all(n.master_last_ordered_3PC == (0, 3) for n in other_nodes) with delay_rules_without_processing(lagging_node.nodeIbStasher, @@ -73,4 +73,4 @@ def test_re_order_pre_prepares_no_pre_prepares(looper, txnPoolNodeSet, waitNodeDataEquality(looper, lagging_node, *other_nodes, customTimeout=60) assert lagging_node.master_last_ordered_3PC == (0, 4) - sdk_ensure_pool_functional(looper, txnPoolNodeSet, sdk_wallet_client, sdk_pool_handle) \ No newline at end of file + vdr_ensure_pool_functional(looper, txnPoolNodeSet, vdr_wallet_client, vdr_pool_handle) \ No newline at end of file diff --git a/plenum/test/view_change/test_resend_inst_ch_in_progress_v_ch.py b/plenum/test/view_change/test_resend_inst_ch_in_progress_v_ch.py index 8a453bd6fd..de3731e16f 100644 --- a/plenum/test/view_change/test_resend_inst_ch_in_progress_v_ch.py +++ b/plenum/test/view_change/test_resend_inst_ch_in_progress_v_ch.py @@ -1,7 +1,7 @@ import pytest from stp_core.loop.eventually import eventually -from plenum.test.node_request.helper import sdk_ensure_pool_functional +from plenum.test.node_request.helper import vdr_ensure_pool_functional from plenum.test.stasher import delay_rules_without_processing from plenum.test.delayers import icDelay @@ -24,8 +24,8 @@ def tconf(tconf): tconf.STATE_FRESHNESS_UPDATE_INTERVAL = old_value_freshness -def test_resend_inst_ch_in_progress_v_ch(txnPoolNodeSet, looper, sdk_pool_handle, - sdk_wallet_client, tdir, tconf, allPluginsPath): +def test_resend_inst_ch_in_progress_v_ch(txnPoolNodeSet, looper, vdr_pool_handle, + vdr_wallet_client, tdir, tconf, allPluginsPath): old_view = viewNoForNodes(txnPoolNodeSet) # disconnect two nodes. One of them should be next master primary in case of view change. @@ -47,4 +47,4 @@ def checks(): looper.run(eventually(checks, timeout=tconf.NEW_VIEW_TIMEOUT * 2.5, retryWait=1)) - sdk_ensure_pool_functional(looper, txnPoolNodeSet, sdk_wallet_client, sdk_pool_handle) + vdr_ensure_pool_functional(looper, txnPoolNodeSet, vdr_wallet_client, vdr_pool_handle) diff --git a/plenum/test/view_change/test_reset_monitor_after_view_change.py b/plenum/test/view_change/test_reset_monitor_after_view_change.py index bf0a78659f..213abd5223 100644 --- a/plenum/test/view_change/test_reset_monitor_after_view_change.py +++ b/plenum/test/view_change/test_reset_monitor_after_view_change.py @@ -2,8 +2,8 @@ from plenum.test.testing_utils import FakeSomething -def test_reset_monitor_after_view_change_events(create_node_and_not_start): - node = create_node_and_not_start +def test_reset_monitor_after_view_change_events(vdr_create_node_and_not_start): + node = vdr_create_node_and_not_start node.view_changer = FakeSomething(propagate_primary=False, view_no=1) diff --git a/plenum/test/view_change/test_reverted_unordered.py b/plenum/test/view_change/test_reverted_unordered.py index 6d85573d01..cb0dc8d0aa 100644 --- a/plenum/test/view_change/test_reverted_unordered.py +++ b/plenum/test/view_change/test_reverted_unordered.py @@ -6,13 +6,13 @@ from plenum.common.messages.node_messages import Commit from plenum.common.util import check_if_all_equal_in_list from plenum.test.delayers import cDelay, msg_rep_delay, lsDelay, cr_delay -from plenum.test.helper import sdk_send_batches_of_random_and_check +from plenum.test.helper import vdr_send_batches_of_random_and_check from plenum.test.node_catchup.helper import ensure_all_nodes_have_same_data from plenum.test.test_node import getNonPrimaryReplicas, ensureElectionsDone from plenum.test.view_change.helper import ensure_view_change -def test_reverted_unordered(txnPoolNodeSet, looper, sdk_pool_handle, sdk_wallet_client): +def test_reverted_unordered(txnPoolNodeSet, looper, vdr_pool_handle, vdr_wallet_client): """ Before starting catchup, revert any uncommitted changes to state and ledger. This is to avoid any re-application of requests that were @@ -43,8 +43,8 @@ def test_reverted_unordered(txnPoolNodeSet, looper, sdk_pool_handle, sdk_wallet_ fast_nodes = [n for n in txnPoolNodeSet if n != slow_node] slow_node.nodeIbStasher.delay(cDelay(120, 0)) sent_batches = 5 - sdk_send_batches_of_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle, - sdk_wallet_client, 2 * sent_batches, sent_batches) + vdr_send_batches_of_random_and_check(looper, txnPoolNodeSet, vdr_pool_handle, + vdr_wallet_client, 2 * sent_batches, sent_batches) # Fast nodes have same last ordered and same data last_ordered = [n.master_last_ordered_3PC for n in fast_nodes] @@ -87,6 +87,6 @@ def chk2(): looper.run(eventually(chk2, retryWait=1)) # Ensure pool is functional - sdk_send_batches_of_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle, - sdk_wallet_client, 10, 2) + vdr_send_batches_of_random_and_check(looper, txnPoolNodeSet, vdr_pool_handle, + vdr_wallet_client, 10, 2) ensure_all_nodes_have_same_data(looper, txnPoolNodeSet) diff --git a/plenum/test/view_change/test_select_primary_after_removed_backup.py b/plenum/test/view_change/test_select_primary_after_removed_backup.py index c093c03011..1d00333be7 100644 --- a/plenum/test/view_change/test_select_primary_after_removed_backup.py +++ b/plenum/test/view_change/test_select_primary_after_removed_backup.py @@ -6,8 +6,8 @@ def test_select_primary_after_removed_backup(txnPoolNodeSet, looper, - sdk_pool_handle, - sdk_wallet_client): + vdr_pool_handle, + vdr_wallet_client): """ Check correct order of primaries on backup replicas """ diff --git a/plenum/test/view_change/test_start_view_change_by_vc_msgs.py b/plenum/test/view_change/test_start_view_change_by_vc_msgs.py index 2c33d72e02..1ba1a6f2f1 100644 --- a/plenum/test/view_change/test_start_view_change_by_vc_msgs.py +++ b/plenum/test/view_change/test_start_view_change_by_vc_msgs.py @@ -1,6 +1,6 @@ from plenum.test.delayers import icDelay from plenum.test.helper import checkViewNoForNodes -from plenum.test.node_request.helper import sdk_ensure_pool_functional +from plenum.test.node_request.helper import vdr_ensure_pool_functional from plenum.test.stasher import delay_rules_without_processing from plenum.test.test_node import ensureElectionsDone from plenum.test.view_change_service.helper import get_next_primary_name, trigger_view_change @@ -9,8 +9,8 @@ def test_start_view_change_by_vc_msgs(looper, txnPoolNodeSet, - sdk_wallet_client, - sdk_pool_handle): + vdr_wallet_client, + vdr_pool_handle): delayed_node = txnPoolNodeSet[-1] rest_nodes = txnPoolNodeSet[:-1] @@ -19,13 +19,13 @@ def test_start_view_change_by_vc_msgs(looper, trigger_view_change(txnPoolNodeSet) looper.run(eventually(checkViewNoForNodes, rest_nodes, current_view_no + 1)) ensureElectionsDone(looper, txnPoolNodeSet) - sdk_ensure_pool_functional(looper, txnPoolNodeSet, sdk_wallet_client, sdk_pool_handle) + vdr_ensure_pool_functional(looper, txnPoolNodeSet, vdr_wallet_client, vdr_pool_handle) def test_delay_IC_for_next_primary(looper, txnPoolNodeSet, - sdk_pool_handle, - sdk_wallet_client): + vdr_pool_handle, + vdr_wallet_client): current_view_no = checkViewNoForNodes(txnPoolNodeSet) next_primary_name = get_next_primary_name(txnPoolNodeSet, current_view_no + 1) next_primary = [n for n in txnPoolNodeSet if n.name == next_primary_name][0] @@ -34,5 +34,5 @@ def test_delay_IC_for_next_primary(looper, trigger_view_change(txnPoolNodeSet) looper.run(eventually(checkViewNoForNodes, rest_nodes, current_view_no + 1)) ensureElectionsDone(looper, txnPoolNodeSet) - sdk_ensure_pool_functional(looper, txnPoolNodeSet, sdk_wallet_client, sdk_pool_handle) + vdr_ensure_pool_functional(looper, txnPoolNodeSet, vdr_wallet_client, vdr_pool_handle) assert next_primary.master_replica.isPrimary diff --git a/plenum/test/view_change/test_unstash_waiting_for_first_batch_order.py b/plenum/test/view_change/test_unstash_waiting_for_first_batch_order.py index 2af23c5836..7f9a2a8039 100644 --- a/plenum/test/view_change/test_unstash_waiting_for_first_batch_order.py +++ b/plenum/test/view_change/test_unstash_waiting_for_first_batch_order.py @@ -1,7 +1,7 @@ import pytest from plenum.test.delayers import cDelay -from plenum.test.helper import sdk_send_random_and_check +from plenum.test.helper import vdr_send_random_and_check from plenum.test.node_catchup.helper import ensure_all_nodes_have_same_data from plenum.test.stasher import delay_rules from plenum.test.test_node import ensureElectionsDone, getRequiredInstances @@ -25,20 +25,20 @@ def tconf(tconf): def test_unstash_waiting_for_first_batch_ordered( - looper, txnPoolNodeSet, sdk_wallet_client, sdk_pool_handle, tconf): + looper, txnPoolNodeSet, vdr_wallet_client, vdr_pool_handle, tconf): lagged_node = txnPoolNodeSet[-1] other_nodes = list(set(txnPoolNodeSet) - {lagged_node}) - sdk_send_random_and_check(looper, txnPoolNodeSet, - sdk_pool_handle, sdk_wallet_client, 1) + vdr_send_random_and_check(looper, txnPoolNodeSet, + vdr_pool_handle, vdr_wallet_client, 1) with delay_rules(lagged_node.nodeIbStasher, cDelay()): ensure_view_change(looper, txnPoolNodeSet) ensureElectionsDone(looper, other_nodes, instances_list=range(getRequiredInstances(len(txnPoolNodeSet)))) - sdk_send_random_and_check(looper, txnPoolNodeSet, - sdk_pool_handle, sdk_wallet_client, 2) + vdr_send_random_and_check(looper, txnPoolNodeSet, + vdr_pool_handle, vdr_wallet_client, 2) ensureElectionsDone(looper, txnPoolNodeSet) ensure_all_nodes_have_same_data(looper, txnPoolNodeSet) diff --git a/plenum/test/view_change/test_unstash_waiting_for_first_batch_ordered_after_catchup.py b/plenum/test/view_change/test_unstash_waiting_for_first_batch_ordered_after_catchup.py index ec72cc8714..402b73c531 100644 --- a/plenum/test/view_change/test_unstash_waiting_for_first_batch_ordered_after_catchup.py +++ b/plenum/test/view_change/test_unstash_waiting_for_first_batch_ordered_after_catchup.py @@ -2,7 +2,7 @@ from plenum.common.constants import PREPARE, PREPREPARE from plenum.test.delayers import cDelay, msg_rep_delay -from plenum.test.helper import sdk_send_random_and_check, assertExp, sdk_send_random_requests +from plenum.test.helper import vdr_send_random_and_check, assertExp, vdr_send_random_requests from plenum.test.node_catchup.helper import ensure_all_nodes_have_same_data from plenum.test.propagate.helper import recvdPrePrepareForInstId from plenum.test.stasher import delay_rules, delay_rules_without_processing @@ -29,13 +29,13 @@ def tconf(tconf): def test_unstash_waiting_for_first_batch_ordered_after_catchup( - looper, txnPoolNodeSet, sdk_wallet_client, sdk_pool_handle, tconf): + looper, txnPoolNodeSet, vdr_wallet_client, vdr_pool_handle, tconf): lagged_node = txnPoolNodeSet[-1] other_nodes = list(set(txnPoolNodeSet) - {lagged_node}) other_stashers = [n.nodeIbStasher for n in other_nodes] - sdk_send_random_and_check(looper, txnPoolNodeSet, - sdk_pool_handle, sdk_wallet_client, 1) + vdr_send_random_and_check(looper, txnPoolNodeSet, + vdr_pool_handle, vdr_wallet_client, 1) last_ordered_lagged_before = lagged_node.master_last_ordered_3PC # do not process any message reqs for PrePrepares @@ -46,13 +46,13 @@ def test_unstash_waiting_for_first_batch_ordered_after_catchup( ensureElectionsDone(looper, other_nodes, instances_list=range(getRequiredInstances(len(txnPoolNodeSet)))) - sdk_send_random_and_check(looper, txnPoolNodeSet, - sdk_pool_handle, sdk_wallet_client, 1) + vdr_send_random_and_check(looper, txnPoolNodeSet, + vdr_pool_handle, vdr_wallet_client, 1) # delay Commits on all nodes so that there are some PrePrepares still stashed after catchup with delay_rules(other_stashers, cDelay()): pre_prep_before = len(recvdPrePrepareForInstId(lagged_node, 0)) - sdk_send_random_requests(looper, sdk_pool_handle, sdk_wallet_client, 2) + vdr_send_random_requests(looper, vdr_pool_handle, vdr_wallet_client, 2) # wait till lagged node recives the new PrePrepares # they will be stashed as WAITING_FIRST_BATCH_IN_VIEW looper.run( @@ -64,8 +64,8 @@ def test_unstash_waiting_for_first_batch_ordered_after_catchup( looper.run( eventually(lambda: assertExp(lagged_node.master_last_ordered_3PC > last_ordered_lagged_before))) - sdk_send_random_and_check(looper, txnPoolNodeSet, - sdk_pool_handle, sdk_wallet_client, 2) + vdr_send_random_and_check(looper, txnPoolNodeSet, + vdr_pool_handle, vdr_wallet_client, 2) ensureElectionsDone(looper, txnPoolNodeSet, customTimeout=30) ensure_all_nodes_have_same_data(looper, txnPoolNodeSet, custom_timeout=30) diff --git a/plenum/test/view_change/test_vc_finished_when_less_than_quorum_started.py b/plenum/test/view_change/test_vc_finished_when_less_than_quorum_started.py index f885a6b23b..2c7139869f 100644 --- a/plenum/test/view_change/test_vc_finished_when_less_than_quorum_started.py +++ b/plenum/test/view_change/test_vc_finished_when_less_than_quorum_started.py @@ -1,6 +1,6 @@ import pytest -from plenum.test.helper import sdk_send_random_and_check, waitForViewChange +from plenum.test.helper import vdr_send_random_and_check, waitForViewChange from plenum.test.node_catchup.helper import ensure_all_nodes_have_same_data from plenum.test.test_node import ensureElectionsDone from plenum.test.view_change.helper import restart_node, nodes_received_ic @@ -17,7 +17,7 @@ def tconf(tconf): def test_vc_finished_when_less_than_quorum_started(looper, txnPoolNodeSet, - sdk_wallet_client, sdk_pool_handle, + vdr_wallet_client, vdr_pool_handle, tconf, tdir, allPluginsPath): alpha, beta, gamma, delta = txnPoolNodeSet @@ -37,8 +37,8 @@ def test_vc_finished_when_less_than_quorum_started(looper, txnPoolNodeSet, send_test_instance_change(beta) # Ensure that pool is still functional - sdk_send_random_and_check(looper, txnPoolNodeSet, - sdk_pool_handle, sdk_wallet_client, 1) + vdr_send_random_and_check(looper, txnPoolNodeSet, + vdr_pool_handle, vdr_wallet_client, 1) # Alpha and Gamma send InstanceChange for all nodes. for node in [gamma, alpha]: @@ -49,6 +49,6 @@ def test_vc_finished_when_less_than_quorum_started(looper, txnPoolNodeSet, customTimeout=tconf.NEW_VIEW_TIMEOUT) # Ensure that pool is still functional - sdk_send_random_and_check(looper, txnPoolNodeSet, - sdk_pool_handle, sdk_wallet_client, 1) + vdr_send_random_and_check(looper, txnPoolNodeSet, + vdr_pool_handle, vdr_wallet_client, 1) ensure_all_nodes_have_same_data(looper, txnPoolNodeSet) diff --git a/plenum/test/view_change/test_vc_started_in_different_time.py b/plenum/test/view_change/test_vc_started_in_different_time.py index 709be38d6d..f8fa3e960a 100644 --- a/plenum/test/view_change/test_vc_started_in_different_time.py +++ b/plenum/test/view_change/test_vc_started_in_different_time.py @@ -1,6 +1,6 @@ import pytest -from plenum.test.helper import sdk_send_random_and_check +from plenum.test.helper import vdr_send_random_and_check from plenum.test.node_catchup.helper import ensure_all_nodes_have_same_data from plenum.test.view_change.helper import restart_node, nodes_received_ic from plenum.test.view_change_service.helper import send_test_instance_change @@ -16,7 +16,7 @@ def tconf(tconf): def test_vc_started_in_different_time(looper, txnPoolNodeSet, - sdk_wallet_client, sdk_pool_handle, + vdr_wallet_client, vdr_pool_handle, tconf, tdir, allPluginsPath): alpha, beta, gamma, delta = txnPoolNodeSet @@ -36,8 +36,8 @@ def test_vc_started_in_different_time(looper, txnPoolNodeSet, send_test_instance_change(beta) # Ensure that pool is still functional - sdk_send_random_and_check(looper, txnPoolNodeSet, - sdk_pool_handle, sdk_wallet_client, 1) + vdr_send_random_and_check(looper, txnPoolNodeSet, + vdr_pool_handle, vdr_wallet_client, 1) # Restart Alpha, Beta for i, node in enumerate([alpha, beta]): @@ -50,6 +50,6 @@ def test_vc_started_in_different_time(looper, txnPoolNodeSet, send_test_instance_change(node) # Ensure that pool is still functional - sdk_send_random_and_check(looper, txnPoolNodeSet, - sdk_pool_handle, sdk_wallet_client, 1) + vdr_send_random_and_check(looper, txnPoolNodeSet, + vdr_pool_handle, vdr_wallet_client, 1) ensure_all_nodes_have_same_data(looper, txnPoolNodeSet) diff --git a/plenum/test/view_change/test_vc_with_incorrect_primary_in_promote.py b/plenum/test/view_change/test_vc_with_incorrect_primary_in_promote.py index db6992013e..574e2c606c 100644 --- a/plenum/test/view_change/test_vc_with_incorrect_primary_in_promote.py +++ b/plenum/test/view_change/test_vc_with_incorrect_primary_in_promote.py @@ -1,10 +1,10 @@ import pytest from plenum.test.delayers import cDelay, ppDelay, pDelay, icDelay, msg_rep_delay, vc_delay, nv_delay -from plenum.test.helper import waitForViewChange, checkViewNoForNodes, sdk_send_random_and_check +from plenum.test.helper import waitForViewChange, checkViewNoForNodes, vdr_send_random_and_check from plenum.test.node_catchup.helper import ensure_all_nodes_have_same_data -from plenum.test.node_request.helper import sdk_ensure_pool_functional -from plenum.test.pool_transactions.helper import sdk_add_new_steward_and_node +from plenum.test.node_request.helper import vdr_ensure_pool_functional +from plenum.test.pool_transactions.helper import vdr_add_new_steward_and_node from plenum.test.stasher import delay_rules_without_processing from plenum.test.test_node import checkNodesConnected, ensureElectionsDone from plenum.test.view_change_service.helper import trigger_view_change @@ -33,8 +33,8 @@ def tconf(tconf): tconf.LOG_SIZE = old_log_size -def test_finish_view_change_with_incorrect_primaries_list(looper, txnPoolNodeSet, sdk_pool_handle, - sdk_wallet_steward, tdir, tconf, allPluginsPath): +def test_finish_view_change_with_incorrect_primaries_list(looper, txnPoolNodeSet, vdr_pool_handle, + vdr_wallet_steward, tdir, tconf, allPluginsPath): """ This test imitates situation when one of nodes is lagged. It missed txn for adding new node and view_change after this. @@ -70,7 +70,7 @@ def complete_vc(node): pDelay()): # Add new node and this action should starts view_change because of NODE txn ordered - _, theta = sdk_add_new_steward_and_node(looper, sdk_pool_handle, sdk_wallet_steward, + _, theta = vdr_add_new_steward_and_node(looper, vdr_pool_handle, vdr_wallet_steward, 'Theta_Steward', 'Theta', tdir, tconf, allPluginsPath=allPluginsPath) txnPoolNodeSet.append(theta) @@ -96,6 +96,6 @@ def complete_vc(node): # We assume that after 2 Checkpoints receiving lagged node will start catchup and elect right primaries - sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_steward, 2 * CHK_SIZE) + vdr_send_random_and_check(looper, txnPoolNodeSet, vdr_pool_handle, vdr_wallet_steward, 2 * CHK_SIZE) ensureElectionsDone(looper, txnPoolNodeSet) - sdk_ensure_pool_functional(looper, txnPoolNodeSet, sdk_wallet_steward, sdk_pool_handle) + vdr_ensure_pool_functional(looper, txnPoolNodeSet, vdr_wallet_steward, vdr_pool_handle) diff --git a/plenum/test/view_change/test_view_change.py b/plenum/test/view_change/test_view_change.py index 24768207de..4c71af20c0 100644 --- a/plenum/test/view_change/test_view_change.py +++ b/plenum/test/view_change/test_view_change.py @@ -1,4 +1,4 @@ -from plenum.test.helper import sdk_send_random_and_check +from plenum.test.helper import vdr_send_random_and_check from plenum.test.node_catchup.helper import ensure_all_nodes_have_same_data from plenum.test.spy_helpers import get_count from plenum.test.test_node import ensureElectionsDone @@ -19,13 +19,13 @@ def test_view_change_on_empty_ledger(txnPoolNodeSet, looper): # noinspection PyIncorrectDocstring def test_view_change_after_some_txns(looper, txnPoolNodeSet, viewNo, - sdk_pool_handle, - sdk_wallet_client): + vdr_pool_handle, + vdr_wallet_client): """ Check that view change is done after processing some of txns """ - sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle, - sdk_wallet_client, 3) + vdr_send_random_and_check(looper, txnPoolNodeSet, vdr_pool_handle, + vdr_wallet_client, 3) ensure_view_change(looper, txnPoolNodeSet) ensureElectionsDone(looper=looper, nodes=txnPoolNodeSet) @@ -34,19 +34,19 @@ def test_view_change_after_some_txns(looper, txnPoolNodeSet, viewNo, # noinspection PyIncorrectDocstring def test_send_more_after_view_change(looper, txnPoolNodeSet, - sdk_pool_handle, sdk_wallet_client): + vdr_pool_handle, vdr_wallet_client): """ Check that we can send more requests after view change """ - sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle, - sdk_wallet_client, 4) + vdr_send_random_and_check(looper, txnPoolNodeSet, vdr_pool_handle, + vdr_wallet_client, 4) ensure_view_change(looper, txnPoolNodeSet) ensureElectionsDone(looper=looper, nodes=txnPoolNodeSet) ensure_all_nodes_have_same_data(looper, nodes=txnPoolNodeSet) - sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle, - sdk_wallet_client, 10) + vdr_send_random_and_check(looper, txnPoolNodeSet, vdr_pool_handle, + vdr_wallet_client, 10) def test_node_notified_about_primary_election_result(txnPoolNodeSet, looper): diff --git a/plenum/test/view_change/test_view_change_after_back_to_quorum_with_disconnected_primary.py b/plenum/test/view_change/test_view_change_after_back_to_quorum_with_disconnected_primary.py index e8b0df64f0..84ca4ad5cd 100644 --- a/plenum/test/view_change/test_view_change_after_back_to_quorum_with_disconnected_primary.py +++ b/plenum/test/view_change/test_view_change_after_back_to_quorum_with_disconnected_primary.py @@ -1,6 +1,6 @@ import pytest -from plenum.test.helper import checkViewNoForNodes, waitForViewChange, sdk_send_random_and_check, view_change_timeout +from plenum.test.helper import checkViewNoForNodes, waitForViewChange, vdr_send_random_and_check, view_change_timeout from plenum.test.node_catchup.helper import ensure_all_nodes_have_same_data from plenum.test.pool_transactions.helper import disconnect_node_and_ensure_disconnected from plenum.test.test_node import get_master_primary_node @@ -15,8 +15,8 @@ def tconf(tconf): def test_view_change_after_back_to_quorum_with_disconnected_primary(txnPoolNodeSet, looper, - sdk_pool_handle, - sdk_wallet_client, + vdr_pool_handle, + vdr_wallet_client, tdir, tconf, allPluginsPath): assert len(txnPoolNodeSet) == 4 @@ -77,6 +77,6 @@ def test_view_change_after_back_to_quorum_with_disconnected_primary(txnPoolNodeS customTimeout=3 * tconf.NEW_VIEW_TIMEOUT) # 6. ensure pool is working properly - sdk_send_random_and_check(looper, remaining_nodes, sdk_pool_handle, - sdk_wallet_client, 3) + vdr_send_random_and_check(looper, remaining_nodes, vdr_pool_handle, + vdr_wallet_client, 3) ensure_all_nodes_have_same_data(looper, nodes=remaining_nodes) diff --git a/plenum/test/view_change/test_view_change_after_back_to_quorum_with_disconnected_primary_and_slow_node.py b/plenum/test/view_change/test_view_change_after_back_to_quorum_with_disconnected_primary_and_slow_node.py index 02c731320e..443bca0236 100644 --- a/plenum/test/view_change/test_view_change_after_back_to_quorum_with_disconnected_primary_and_slow_node.py +++ b/plenum/test/view_change/test_view_change_after_back_to_quorum_with_disconnected_primary_and_slow_node.py @@ -2,7 +2,7 @@ from plenum.common.constants import LEDGER_STATUS from plenum.test.delayers import msg_rep_delay -from plenum.test.helper import checkViewNoForNodes, waitForViewChange, sdk_send_random_and_check, view_change_timeout +from plenum.test.helper import checkViewNoForNodes, waitForViewChange, vdr_send_random_and_check, view_change_timeout from plenum.test.node_catchup.helper import ensure_all_nodes_have_same_data from plenum.test.pool_transactions.helper import disconnect_node_and_ensure_disconnected from plenum.test.stasher import delay_rules @@ -20,8 +20,8 @@ def tconf(tconf): def test_view_change_after_back_to_quorum_with_disconnected_primary(txnPoolNodeSet, looper, - sdk_pool_handle, - sdk_wallet_client, + vdr_pool_handle, + vdr_wallet_client, tdir, tconf, allPluginsPath): assert len(txnPoolNodeSet) == 4 @@ -89,6 +89,6 @@ def test_view_change_after_back_to_quorum_with_disconnected_primary(txnPoolNodeS customTimeout=3 * tconf.NEW_VIEW_TIMEOUT) # 7. ensure pool is working properly - sdk_send_random_and_check(looper, remaining_nodes, sdk_pool_handle, - sdk_wallet_client, 3) + vdr_send_random_and_check(looper, remaining_nodes, vdr_pool_handle, + vdr_wallet_client, 3) ensure_all_nodes_have_same_data(looper, nodes=remaining_nodes) diff --git a/plenum/test/view_change/test_view_change_done_delayed.py b/plenum/test/view_change/test_view_change_done_delayed.py index a7b1460be0..31ae401c3f 100644 --- a/plenum/test/view_change/test_view_change_done_delayed.py +++ b/plenum/test/view_change/test_view_change_done_delayed.py @@ -1,7 +1,7 @@ import pytest from plenum.test.delayers import delay_3pc_messages, nv_delay -from plenum.test.helper import sdk_send_batches_of_random_and_check, sdk_send_random_and_check +from plenum.test.helper import vdr_send_batches_of_random_and_check, vdr_send_random_and_check from plenum.test.node_catchup.helper import waitNodeDataEquality, \ ensure_all_nodes_have_same_data from plenum.test.test_node import getNonPrimaryReplicas @@ -9,7 +9,7 @@ from stp_core.loop.eventually import eventually -def test_view_change_done_delayed(txnPoolNodeSet, looper, sdk_pool_handle, sdk_wallet_client): +def test_view_change_done_delayed(txnPoolNodeSet, looper, vdr_pool_handle, vdr_wallet_client): """ A node is slow so is behind other nodes, after view change, it catches up but it also gets view change message as delayed, a node should start @@ -27,8 +27,8 @@ def chk(node): assert node.isParticipating assert None not in {r.isPrimary for r in node.replicas.values()} - sdk_send_batches_of_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle, - sdk_wallet_client, 5 * 4, 4) + vdr_send_batches_of_random_and_check(looper, txnPoolNodeSet, vdr_pool_handle, + vdr_wallet_client, 5 * 4, 4) ensure_view_change(looper, nodes=txnPoolNodeSet) @@ -45,7 +45,7 @@ def chk(node): assert all(slow_node.viewNo == node.viewNo for node in other_nodes) # Send requests to make sure pool is functional - sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_client, 5) + vdr_send_random_and_check(looper, txnPoolNodeSet, vdr_pool_handle, vdr_wallet_client, 5) # Repair network slow_node.reset_delays_and_process_delayeds() @@ -57,5 +57,5 @@ def chk(node): waitNodeDataEquality(looper, slow_node, *other_nodes) # Send more requests and compare data of all nodes - sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_client, 5) + vdr_send_random_and_check(looper, txnPoolNodeSet, vdr_pool_handle, vdr_wallet_client, 5) ensure_all_nodes_have_same_data(looper, txnPoolNodeSet) diff --git a/plenum/test/view_change/test_view_change_on_master_degraded.py b/plenum/test/view_change/test_view_change_on_master_degraded.py index fc8abb2cce..7e16a352ef 100644 --- a/plenum/test/view_change/test_view_change_on_master_degraded.py +++ b/plenum/test/view_change/test_view_change_on_master_degraded.py @@ -5,7 +5,7 @@ from plenum.common.throughput_measurements import RevivalSpikeResistantEMAThroughputMeasurement from plenum.test.delayers import delayNonPrimaries from plenum.test.helper import waitForViewChange, \ - sdk_send_random_and_check, assertExp + vdr_send_random_and_check, assertExp from plenum.test.node_catchup.helper import ensure_all_nodes_have_same_data, waitNodeDataEquality from plenum.test.test_node import get_master_primary_node, getPrimaryReplica, \ ensureElectionsDone @@ -46,8 +46,8 @@ def tconf(tconf): def test_view_change_on_performance_degraded(looper, txnPoolNodeSet, viewNo, - sdk_pool_handle, - sdk_wallet_steward): + vdr_pool_handle, + vdr_wallet_steward): """ Test that a view change is done when the performance of master goes down Send multiple requests from the client and delay some requests by master @@ -68,8 +68,8 @@ def test_view_change_on_performance_degraded(looper, txnPoolNodeSet, viewNo, def test_view_change_on_quorum_of_master_degraded(txnPoolNodeSet, looper, - sdk_pool_handle, - sdk_wallet_steward, + vdr_pool_handle, + vdr_wallet_steward, viewNo): """ Node will change view even though it does not find the master to be degraded @@ -96,8 +96,8 @@ def test_view_change_on_quorum_of_master_degraded(txnPoolNodeSet, looper, backup_replica = txnPoolNodeSet[0].replicas[1] backup_last_ordered_before = backup_replica.last_ordered_3pc - sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle, - sdk_wallet_steward, 4) + vdr_send_random_and_check(looper, txnPoolNodeSet, vdr_pool_handle, + vdr_wallet_steward, 4) # make sure that backups also ordered at least 1 batch to be able to track performance degradation looper.run(eventually(lambda: assertExp(backup_replica.last_ordered_3pc > backup_last_ordered_before))) diff --git a/plenum/test/view_change/test_view_change_timeout.py b/plenum/test/view_change/test_view_change_timeout.py index 888641ef64..1fc3d60736 100644 --- a/plenum/test/view_change/test_view_change_timeout.py +++ b/plenum/test/view_change/test_view_change_timeout.py @@ -3,7 +3,7 @@ from plenum.test.delayers import nv_delay from plenum.test.stasher import delay_rules from plenum.test.helper import waitForViewChange, perf_monitor_disabled, view_change_timeout -from plenum.test.node_request.helper import sdk_ensure_pool_functional +from plenum.test.node_request.helper import vdr_ensure_pool_functional from plenum.test.node_catchup.helper import ensure_all_nodes_have_same_data from plenum.test.spy_helpers import get_count, getAllReturnVals from plenum.test.test_node import get_master_primary_node, \ @@ -74,7 +74,7 @@ def setup(txnPoolNodeSet, looper): @pytest.mark.skip(reason="INDY-2244 will be fixed in the scope clean-up work") def test_view_change_retry_by_timeout( - txnPoolNodeSet, looper, tconf, setup, sdk_pool_handle, sdk_wallet_client): + txnPoolNodeSet, looper, tconf, setup, vdr_pool_handle, vdr_wallet_client): """ Verifies that a view change is restarted if it is not completed in time """ @@ -104,15 +104,15 @@ def test_view_change_retry_by_timeout( for node in txnPoolNodeSet: assert node.viewNo - initial_view_no == 2 - sdk_ensure_pool_functional(looper, txnPoolNodeSet, - sdk_wallet_client, - sdk_pool_handle) + vdr_ensure_pool_functional(looper, txnPoolNodeSet, + vdr_wallet_client, + vdr_pool_handle) @pytest.mark.skip(reason="INDY-2244 will be fixed in the scope clean-up work") def test_multiple_view_change_retries_by_timeouts( txnPoolNodeSet, looper, tconf, setup, - sdk_pool_handle, sdk_wallet_client): + vdr_pool_handle, vdr_wallet_client): """ Verifies that a view change is restarted each time when the previous one is timed out @@ -141,9 +141,9 @@ def test_multiple_view_change_retries_by_timeouts( for node in txnPoolNodeSet: assert node.viewNo - initial_view_no == 4 - sdk_ensure_pool_functional(looper, txnPoolNodeSet, - sdk_wallet_client, - sdk_pool_handle) + vdr_ensure_pool_functional(looper, txnPoolNodeSet, + vdr_wallet_client, + vdr_pool_handle) @pytest.mark.skip(reason="INDY-2244 will be fixed in the scope clean-up work") diff --git a/plenum/test/view_change/test_view_change_with_instance_change_lost_due_to_restarts.py b/plenum/test/view_change/test_view_change_with_instance_change_lost_due_to_restarts.py index 50e4998caf..44dd14ee86 100644 --- a/plenum/test/view_change/test_view_change_with_instance_change_lost_due_to_restarts.py +++ b/plenum/test/view_change/test_view_change_with_instance_change_lost_due_to_restarts.py @@ -1,7 +1,7 @@ import pytest from plenum.test.helper import freshness, waitForViewChange -from plenum.test.node_request.helper import sdk_ensure_pool_functional +from plenum.test.node_request.helper import vdr_ensure_pool_functional from plenum.test.restart.helper import restart_nodes from plenum.test.test_node import ensureElectionsDone from plenum.test.view_change_service.helper import send_test_instance_change @@ -17,8 +17,8 @@ def tconf(tconf): def test_view_change_with_instance_change_lost_due_to_restarts(looper, txnPoolNodeSet, - sdk_pool_handle, - sdk_wallet_client, + vdr_pool_handle, + vdr_wallet_client, tconf, tdir, allPluginsPath): """ 1. some_nodes (Beta and Gamma) send InstanceChange for all nodes. @@ -50,4 +50,4 @@ def check_ic_delivery(): waitForViewChange(looper, txnPoolNodeSet, current_view_no + 1, customTimeout=3 * FRESHNESS_TIMEOUT) ensureElectionsDone(looper, txnPoolNodeSet) - sdk_ensure_pool_functional(looper, txnPoolNodeSet, sdk_wallet_client, sdk_pool_handle) + vdr_ensure_pool_functional(looper, txnPoolNodeSet, vdr_wallet_client, vdr_pool_handle) diff --git a/plenum/test/view_change/test_view_change_with_lost_new_view.py b/plenum/test/view_change/test_view_change_with_lost_new_view.py index 3a66fea54f..50032a4db8 100644 --- a/plenum/test/view_change/test_view_change_with_lost_new_view.py +++ b/plenum/test/view_change/test_view_change_with_lost_new_view.py @@ -2,9 +2,9 @@ from plenum.test.test_node import ensureElectionsDone from plenum.common.messages.node_messages import NewView -from plenum.test.helper import sdk_send_random_and_check, waitForViewChange +from plenum.test.helper import vdr_send_random_and_check, waitForViewChange from plenum.test.node_catchup.helper import ensure_all_nodes_have_same_data -from plenum.test.node_request.helper import sdk_ensure_pool_functional +from plenum.test.node_request.helper import vdr_ensure_pool_functional from plenum.test.view_change_service.helper import trigger_view_change call_count = 0 @@ -26,8 +26,8 @@ def lost_count(request): def test_view_change_with_lost_new_view(txnPoolNodeSet, looper, - sdk_pool_handle, - sdk_wallet_steward, + vdr_pool_handle, + vdr_wallet_steward, tconf, tdir, lost_count): @@ -39,8 +39,8 @@ def test_view_change_with_lost_new_view(txnPoolNodeSet, node_to_disconnect = txnPoolNodeSet[-1] initial_view_no = txnPoolNodeSet[0].viewNo - sdk_send_random_and_check(looper, txnPoolNodeSet, - sdk_pool_handle, sdk_wallet_steward, 5) + vdr_send_random_and_check(looper, txnPoolNodeSet, + vdr_pool_handle, vdr_wallet_steward, 5) def unpatch_after_call(msg, frm): global call_count @@ -62,4 +62,4 @@ def unpatch_after_call(msg, frm): ensure_all_nodes_have_same_data(looper, nodes=txnPoolNodeSet) # make sure that the pool is functional - sdk_ensure_pool_functional(looper, txnPoolNodeSet, sdk_wallet_steward, sdk_pool_handle) + vdr_ensure_pool_functional(looper, txnPoolNodeSet, vdr_wallet_steward, vdr_pool_handle) diff --git a/plenum/test/view_change/test_view_change_without_any_reqs.py b/plenum/test/view_change/test_view_change_without_any_reqs.py index eb045111cc..6a21631240 100644 --- a/plenum/test/view_change/test_view_change_without_any_reqs.py +++ b/plenum/test/view_change/test_view_change_without_any_reqs.py @@ -8,7 +8,7 @@ from plenum.common.constants import DOMAIN_LEDGER_ID from plenum.test.delayers import delay_3pc_messages, \ reset_delays_and_process_delayeds -from plenum.test.helper import sdk_send_random_and_check, waitForViewChange, sdk_send_random_requests +from plenum.test.helper import vdr_send_random_and_check, waitForViewChange, vdr_send_random_requests from plenum.test.node_catchup.helper import ensure_all_nodes_have_same_data from plenum.test.test_node import get_master_primary_node @@ -42,7 +42,7 @@ def tconf(tconf): def test_view_change_on_start(tconf, txnPoolNodeSet, looper, - sdk_pool_handle, sdk_wallet_client): + vdr_pool_handle, vdr_wallet_client): """ Do view change on a without any requests """ @@ -52,7 +52,7 @@ def test_view_change_on_start(tconf, txnPoolNodeSet, looper, delay_3pc = 10 delay_3pc_messages(txnPoolNodeSet, 0, delay_3pc) sent_batches = 2 - sdk_send_random_requests(looper, sdk_pool_handle, sdk_wallet_client, + vdr_send_random_requests(looper, vdr_pool_handle, vdr_wallet_client, sent_batches * tconf.Max3PCBatchSize) def chk1(): @@ -70,6 +70,6 @@ def chk1(): check_uncommitteds_equal(txnPoolNodeSet) reset_delays_and_process_delayeds(txnPoolNodeSet) - sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_client, + vdr_send_random_and_check(looper, txnPoolNodeSet, vdr_pool_handle, vdr_wallet_client, 2 * Max3PCBatchSize, add_delay_to_timeout=delay_3pc) ensure_all_nodes_have_same_data(looper, nodes=txnPoolNodeSet) diff --git a/plenum/test/view_change/test_view_change_wont_happen_if_ic_is_discarded.py b/plenum/test/view_change/test_view_change_wont_happen_if_ic_is_discarded.py index c81f6e86c8..711e7f8699 100644 --- a/plenum/test/view_change/test_view_change_wont_happen_if_ic_is_discarded.py +++ b/plenum/test/view_change/test_view_change_wont_happen_if_ic_is_discarded.py @@ -16,8 +16,8 @@ def tconf(tconf): def test_view_change_not_happen_if_ic_is_discarded(looper, txnPoolNodeSet, - sdk_pool_handle, - sdk_wallet_client, + vdr_pool_handle, + vdr_wallet_client, tconf, tdir, allPluginsPath): """ 1. panic_node (Delta) send InstanceChange for all nodes. diff --git a/plenum/test/view_change/test_view_changes_if_master_primary_disconnected.py b/plenum/test/view_change/test_view_changes_if_master_primary_disconnected.py index 6e2f9a9747..f316ebcddd 100644 --- a/plenum/test/view_change/test_view_changes_if_master_primary_disconnected.py +++ b/plenum/test/view_change/test_view_changes_if_master_primary_disconnected.py @@ -5,11 +5,11 @@ from plenum.test.node_catchup.helper import ensure_all_nodes_have_same_data from plenum.test.test_node import get_master_primary_node, ensure_node_disconnected -from plenum.test.helper import checkViewNoForNodes, sdk_send_random_and_check, waitForViewChange +from plenum.test.helper import checkViewNoForNodes, vdr_send_random_and_check, waitForViewChange -def test_view_changes_if_master_primary_disconnected(txnPoolNodeSet, looper, sdk_pool_handle, - sdk_wallet_client, tdir, tconf, allPluginsPath): +def test_view_changes_if_master_primary_disconnected(txnPoolNodeSet, looper, vdr_pool_handle, + vdr_wallet_client, tdir, tconf, allPluginsPath): """ View change occurs when master's primary is disconnected """ @@ -40,7 +40,7 @@ def test_view_changes_if_master_primary_disconnected(txnPoolNodeSet, looper, sdk new_pr_node = get_master_primary_node(remaining_nodes) assert old_pr_node != new_pr_node - sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_client, 5) + vdr_send_random_and_check(looper, txnPoolNodeSet, vdr_pool_handle, vdr_wallet_client, 5) # Check if old primary can join the pool and still functions old_pr_node = start_stopped_node(old_pr_node, looper, tconf, diff --git a/plenum/test/view_change/test_view_not_changed.py b/plenum/test/view_change/test_view_not_changed.py index 6a0564f0ae..1006c67bdf 100644 --- a/plenum/test/view_change/test_view_not_changed.py +++ b/plenum/test/view_change/test_view_not_changed.py @@ -1,5 +1,5 @@ from plenum.common.util import getMaxFailures -from plenum.test.helper import checkViewNoForNodes, sdk_send_random_and_check +from plenum.test.helper import checkViewNoForNodes, vdr_send_random_and_check from plenum.test.delayers import ppDelay from plenum.test.test_node import TestReplica, getNonPrimaryReplicas @@ -8,7 +8,7 @@ # noinspection PyIncorrectDocstring -def test_view_not_changed(looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_client): +def test_view_not_changed(looper, txnPoolNodeSet, vdr_pool_handle, vdr_wallet_client): """ Test that a view change is not done when the performance of master does not go down @@ -27,6 +27,6 @@ def test_view_not_changed(looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_cl for r in nonPrimReps: r.node.nodeIbStasher.delay(ppDelay(10, i)) - sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_client, 5) + vdr_send_random_and_check(looper, txnPoolNodeSet, vdr_pool_handle, vdr_wallet_client, 5) checkViewNoForNodes(txnPoolNodeSet, expectedViewNo=0) diff --git a/plenum/test/view_change/test_view_not_changed_when_primary_disconnected_from_less_than_quorum.py b/plenum/test/view_change/test_view_not_changed_when_primary_disconnected_from_less_than_quorum.py index 52c08efe18..b81af920b1 100644 --- a/plenum/test/view_change/test_view_not_changed_when_primary_disconnected_from_less_than_quorum.py +++ b/plenum/test/view_change/test_view_not_changed_when_primary_disconnected_from_less_than_quorum.py @@ -6,7 +6,7 @@ from plenum.test.test_node import getNonPrimaryReplicas, get_master_primary_node from plenum.test.view_change.helper import node_received_instance_changes_count from stp_core.loop.eventually import eventually -from plenum.test.helper import checkViewNoForNodes, sdk_send_random_and_check +from plenum.test.helper import checkViewNoForNodes, vdr_send_random_and_check def node_primary_disconnected_calls(node): @@ -15,7 +15,7 @@ def node_primary_disconnected_calls(node): def test_view_not_changed_when_primary_disconnected_from_less_than_quorum( - txnPoolNodeSet, looper, sdk_pool_handle, sdk_wallet_client): + txnPoolNodeSet, looper, vdr_pool_handle, vdr_wallet_client): """ Less than quorum nodes lose connection with primary, this should not trigger view change as the protocol can move ahead @@ -62,7 +62,7 @@ def chk2(): looper.run(eventually(chk2, retryWait=1, timeout=10)) # Send some requests and make sure the request execute - sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_client, 5) + vdr_send_random_and_check(looper, txnPoolNodeSet, vdr_pool_handle, vdr_wallet_client, 5) # Partitioned node should have the same ledger and state as others as it gets reqs from all nodes waitNodeDataEquality(looper, partitioned_node, diff --git a/plenum/test/view_change/test_view_not_changed_when_short_disconnection.py b/plenum/test/view_change/test_view_not_changed_when_short_disconnection.py index a86c6b007f..83d11235eb 100644 --- a/plenum/test/view_change/test_view_not_changed_when_short_disconnection.py +++ b/plenum/test/view_change/test_view_not_changed_when_short_disconnection.py @@ -1,7 +1,7 @@ from plenum.test.view_change.helper import node_received_instance_changes_count from plenum.test.pool_transactions.helper import disconnect_node_and_ensure_disconnected from stp_core.loop.eventually import eventually -from plenum.test.helper import checkViewNoForNodes, sdk_send_random_and_check +from plenum.test.helper import checkViewNoForNodes, vdr_send_random_and_check from plenum.test.test_node import get_master_primary_node from plenum.test.view_change.helper import start_stopped_node @@ -11,7 +11,7 @@ def node_primary_disconnect_count(node): return pcm_service.spylog.count(pcm_service._primary_disconnected) -def test_view_not_changed_when_short_disconnection(txnPoolNodeSet, looper, sdk_pool_handle, sdk_wallet_client, +def test_view_not_changed_when_short_disconnection(txnPoolNodeSet, looper, vdr_pool_handle, vdr_wallet_client, tdir, tconf, allPluginsPath): """ When primary is disconnected but not long enough to trigger the timeout, @@ -52,4 +52,4 @@ def chk2(): assert checkViewNoForNodes(txnPoolNodeSet) == view_no # Send some requests and make sure the request execute - sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_client, 5) + vdr_send_random_and_check(looper, txnPoolNodeSet, vdr_pool_handle, vdr_wallet_client, 5) diff --git a/plenum/test/view_change_service/helper.py b/plenum/test/view_change_service/helper.py index 6c05dfa213..8b9b5b746f 100644 --- a/plenum/test/view_change_service/helper.py +++ b/plenum/test/view_change_service/helper.py @@ -2,10 +2,10 @@ from plenum.common.messages.internal_messages import NodeNeedViewChange, VoteForViewChange from plenum.server.suspicion_codes import Suspicions from plenum.test.delayers import cDelay, ppDelay, msg_rep_delay, old_view_pp_reply_delay, nv_delay -from plenum.test.helper import waitForViewChange, checkViewNoForNodes, sdk_send_random_and_check +from plenum.test.helper import waitForViewChange, checkViewNoForNodes, vdr_send_random_and_check from plenum.test.node_catchup.helper import ensure_all_nodes_have_same_data -from plenum.test.node_request.helper import sdk_ensure_pool_functional -from plenum.test.pool_transactions.helper import sdk_add_new_nym, sdk_add_new_node +from plenum.test.node_request.helper import vdr_ensure_pool_functional +from plenum.test.pool_transactions.helper import vdr_add_new_nym, vdr_add_new_node from plenum.test.stasher import delay_rules_without_processing, delay_rules from plenum.test.test_node import ensureElectionsDone, getNonPrimaryReplicas, checkNodesConnected, TestNode from stp_core.loop.eventually import eventually @@ -52,7 +52,7 @@ def check_view_change_adding_new_node(looper, tdir, tconf, allPluginsPath, # add a new Steward before delaying. Otherwise the slow node may reject NODE client reqs # as it can not authenticate it due to lack of Steward txn applied - new_steward_wallet_handle = sdk_add_new_nym(looper, + new_steward_wallet_handle = vdr_add_new_nym(looper, sdk_pool_handle, sdk_wallet_steward, alias='New_Steward', @@ -63,7 +63,7 @@ def check_view_change_adding_new_node(looper, tdir, tconf, allPluginsPath, with delay_rules(all_stashers, nv_delay()): with delay_rules_without_processing(slow_stashers, *delayers): # Add Node5 - new_node = sdk_add_new_node( + new_node = vdr_add_new_node( looper, sdk_pool_handle, new_steward_wallet_handle, @@ -87,7 +87,7 @@ def check_view_change_adding_new_node(looper, tdir, tconf, allPluginsPath, waitForViewChange(looper, old_set, 4) ensureElectionsDone(looper, old_set) - sdk_ensure_pool_functional(looper, txnPoolNodeSet, sdk_wallet_client, sdk_pool_handle) + vdr_ensure_pool_functional(looper, txnPoolNodeSet, sdk_wallet_client, sdk_pool_handle) def check_has_commits(nodes): @@ -119,7 +119,7 @@ def check_view_change_one_slow_node(looper, txnPoolNodeSet, sdk_pool_handle, sdk # delay OldViewPrePrepareReply so that slow node doesn't receive PrePrepares before ReOrdering phase finishes with delay_rules(delayed_node.nodeIbStasher, old_view_pp_reply_delay()): with delay_rules_without_processing(delayed_node.nodeIbStasher, *delayers): - sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_client, 1) + vdr_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_client, 1) trigger_view_change(txnPoolNodeSet) if vc_counts == 2: for node in txnPoolNodeSet: @@ -132,5 +132,5 @@ def check_view_change_one_slow_node(looper, txnPoolNodeSet, sdk_pool_handle, sdk ensureElectionsDone(looper, txnPoolNodeSet, customTimeout=30) - sdk_ensure_pool_functional(looper, txnPoolNodeSet, sdk_wallet_client, sdk_pool_handle) + vdr_ensure_pool_functional(looper, txnPoolNodeSet, sdk_wallet_client, sdk_pool_handle) ensure_all_nodes_have_same_data(looper, txnPoolNodeSet) diff --git a/plenum/test/view_change_service/test_delay_commits.py b/plenum/test/view_change_service/test_delay_commits.py index 4405071e2f..86a73c17c6 100644 --- a/plenum/test/view_change_service/test_delay_commits.py +++ b/plenum/test/view_change_service/test_delay_commits.py @@ -15,11 +15,11 @@ def slow_node_is_next_primary(request): def test_delay_commits_for_one_node(looper, txnPoolNodeSet, - sdk_pool_handle, - sdk_wallet_client, + vdr_pool_handle, + vdr_wallet_client, slow_node_is_next_primary, vc_counts): - check_view_change_one_slow_node(looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_client, + check_view_change_one_slow_node(looper, txnPoolNodeSet, vdr_pool_handle, vdr_wallet_client, vc_counts=vc_counts, slow_node_is_next_primary=slow_node_is_next_primary, delay_commit=True, delay_pre_prepare=False) diff --git a/plenum/test/view_change_service/test_delay_commits_with_node_disconnection.py b/plenum/test/view_change_service/test_delay_commits_with_node_disconnection.py index 933395f528..a0d1a0d26f 100644 --- a/plenum/test/view_change_service/test_delay_commits_with_node_disconnection.py +++ b/plenum/test/view_change_service/test_delay_commits_with_node_disconnection.py @@ -1,9 +1,9 @@ import pytest from plenum.test.delayers import cDelay -from plenum.test.helper import checkViewNoForNodes, sdk_send_random_and_check +from plenum.test.helper import checkViewNoForNodes, vdr_send_random_and_check from plenum.test.node_catchup.helper import ensure_all_nodes_have_same_data -from plenum.test.node_request.helper import sdk_ensure_pool_functional +from plenum.test.node_request.helper import vdr_ensure_pool_functional from plenum.test.pool_transactions.helper import disconnect_node_and_ensure_disconnected from plenum.test.stasher import delay_rules_without_processing from plenum.test.test_node import getNonPrimaryReplicas, ensureElectionsDone @@ -11,7 +11,7 @@ def test_view_change_with_next_primary_stopped_and_one_node_lost_commit(looper, txnPoolNodeSet, - sdk_pool_handle, sdk_wallet_client, + vdr_pool_handle, vdr_wallet_client, limitTestRunningTime): current_view_no = checkViewNoForNodes(txnPoolNodeSet) next_primary = get_next_primary_name(txnPoolNodeSet, current_view_no + 1) @@ -19,7 +19,7 @@ def test_view_change_with_next_primary_stopped_and_one_node_lost_commit(looper, other_nodes = [n for n in txnPoolNodeSet if n.name != next_primary] with delay_rules_without_processing(delayed_node.nodeIbStasher, cDelay()): - sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_client, 2) + vdr_send_random_and_check(looper, txnPoolNodeSet, vdr_pool_handle, vdr_wallet_client, 2) disconnect_node_and_ensure_disconnected(looper, txnPoolNodeSet, next_primary) trigger_view_change(other_nodes) @@ -27,5 +27,5 @@ def test_view_change_with_next_primary_stopped_and_one_node_lost_commit(looper, ensureElectionsDone(looper, other_nodes, instances_list=range(2), customTimeout=15) ensure_all_nodes_have_same_data(looper, other_nodes) - sdk_ensure_pool_functional(looper, other_nodes, sdk_wallet_client, sdk_pool_handle) + vdr_ensure_pool_functional(looper, other_nodes, vdr_wallet_client, vdr_pool_handle) ensure_all_nodes_have_same_data(looper, other_nodes) diff --git a/plenum/test/view_change_service/test_delay_pre_prepares.py b/plenum/test/view_change_service/test_delay_pre_prepares.py index 8ec8208d70..71b83eeee2 100644 --- a/plenum/test/view_change_service/test_delay_pre_prepares.py +++ b/plenum/test/view_change_service/test_delay_pre_prepares.py @@ -15,11 +15,11 @@ def slow_node_is_next_primary(request): def test_delay_pre_prepare_for_next_primary(looper, txnPoolNodeSet, - sdk_pool_handle, - sdk_wallet_client, + vdr_pool_handle, + vdr_wallet_client, slow_node_is_next_primary, vc_counts): - check_view_change_one_slow_node(looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_client, + check_view_change_one_slow_node(looper, txnPoolNodeSet, vdr_pool_handle, vdr_wallet_client, vc_counts=vc_counts, slow_node_is_next_primary=slow_node_is_next_primary, delay_commit=False, delay_pre_prepare=True) diff --git a/plenum/test/view_change_service/test_lag_by_checkpoint.py b/plenum/test/view_change_service/test_lag_by_checkpoint.py index a7712eb560..afef48d6cb 100644 --- a/plenum/test/view_change_service/test_lag_by_checkpoint.py +++ b/plenum/test/view_change_service/test_lag_by_checkpoint.py @@ -1,9 +1,9 @@ import pytest from plenum.test.delayers import cDelay -from plenum.test.helper import sdk_send_random_and_check, checkViewNoForNodes, assertExp, get_pp_seq_no +from plenum.test.helper import vdr_send_random_and_check, checkViewNoForNodes, assertExp, get_pp_seq_no from plenum.test.node_catchup.helper import ensure_all_nodes_have_same_data -from plenum.test.node_request.helper import sdk_ensure_pool_functional +from plenum.test.node_request.helper import vdr_ensure_pool_functional from plenum.test.stasher import delay_rules_without_processing from plenum.test.test_node import ensureElectionsDone from plenum.test.view_change_service.conftest import CHK_FREQ @@ -13,30 +13,30 @@ def test_lag_less_then_catchup(looper, txnPoolNodeSet, - sdk_pool_handle, - sdk_wallet_client): + vdr_pool_handle, + vdr_wallet_client): delayed_node = txnPoolNodeSet[-1] other_nodes = list(set(txnPoolNodeSet) - {delayed_node}) checkViewNoForNodes(txnPoolNodeSet) last_ordered_before = delayed_node.master_replica.last_ordered_3pc with delay_rules_without_processing(delayed_node.nodeIbStasher, cDelay()): # Send txns for stable checkpoint - sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_client, CHK_FREQ) + vdr_send_random_and_check(looper, txnPoolNodeSet, vdr_pool_handle, vdr_wallet_client, CHK_FREQ) # Check, that all of not slowed nodes has a stable checkpoint for n in other_nodes: assert n.master_replica._consensus_data.stable_checkpoint == CHK_FREQ # Send another txn. This txn will be reordered after view_change - sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_client, 1) + vdr_send_random_and_check(looper, txnPoolNodeSet, vdr_pool_handle, vdr_wallet_client, 1) trigger_view_change(txnPoolNodeSet) ensureElectionsDone(looper, txnPoolNodeSet) assert delayed_node.master_replica.last_ordered_3pc == last_ordered_before # Send txns for stabilize checkpoint on other nodes - sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_client, CHK_FREQ - 1) + vdr_send_random_and_check(looper, txnPoolNodeSet, vdr_pool_handle, vdr_wallet_client, CHK_FREQ - 1) pool_pp_seq_no = get_pp_seq_no(other_nodes) looper.run(eventually(lambda: assertExp(delayed_node.master_replica.last_ordered_3pc[1] == pool_pp_seq_no))) - sdk_ensure_pool_functional(looper, txnPoolNodeSet, sdk_wallet_client, sdk_pool_handle) + vdr_ensure_pool_functional(looper, txnPoolNodeSet, vdr_wallet_client, vdr_pool_handle) ensure_all_nodes_have_same_data(looper, txnPoolNodeSet) diff --git a/plenum/test/view_change_service/test_view_change_add_one_node_uncommitted.py b/plenum/test/view_change_service/test_view_change_add_one_node_uncommitted.py index 52dc4eb62f..096e36a677 100644 --- a/plenum/test/view_change_service/test_view_change_add_one_node_uncommitted.py +++ b/plenum/test/view_change_service/test_view_change_add_one_node_uncommitted.py @@ -2,8 +2,8 @@ from plenum.common.util import randomString from plenum.test.delayers import nv_delay, msg_rep_delay, ppgDelay from plenum.test.helper import waitForViewChange -from plenum.test.node_request.helper import sdk_ensure_pool_functional -from plenum.test.pool_transactions.helper import sdk_add_new_nym, sdk_add_new_node +from plenum.test.node_request.helper import vdr_ensure_pool_functional +from plenum.test.pool_transactions.helper import vdr_add_new_nym, vdr_add_new_node from plenum.test.stasher import delay_rules_without_processing from plenum.test.test_node import ensureElectionsDone, TestNode, getPrimaryReplica from plenum.test.view_change_service.helper import trigger_view_change @@ -29,9 +29,9 @@ def check_node_txn_propagated(nodes): def test_view_change_add_one_node_uncommitted_by_next_primary(looper, tdir, tconf, allPluginsPath, txnPoolNodeSet, - sdk_pool_handle, - sdk_wallet_client, - sdk_wallet_steward): + vdr_pool_handle, + vdr_wallet_client, + vdr_wallet_steward): # 1. Pre-requisites: viewNo=2, Primary is Node3 for viewNo in range(1, 3): trigger_view_change(txnPoolNodeSet) @@ -39,9 +39,9 @@ def test_view_change_add_one_node_uncommitted_by_next_primary(looper, tdir, tcon ensureElectionsDone(looper, txnPoolNodeSet, customTimeout=30) # 2. Add Steward for new Node - new_steward_wallet_handle = sdk_add_new_nym(looper, - sdk_pool_handle, - sdk_wallet_steward, + new_steward_wallet_handle = vdr_add_new_nym(looper, + vdr_pool_handle, + vdr_wallet_steward, alias="testClientSteward" + randomString(3), role=STEWARD_STRING) @@ -52,9 +52,9 @@ def test_view_change_add_one_node_uncommitted_by_next_primary(looper, tdir, tcon primary_node = getPrimaryReplica(txnPoolNodeSet).node next_primary = txnPoolNodeSet[-1] with delay_rules_without_processing(primary_node.nodeIbStasher, ppgDelay()): - sdk_add_new_node( + vdr_add_new_node( looper, - sdk_pool_handle, + vdr_pool_handle, new_steward_wallet_handle, new_node_name="Psi", tdir=tdir, @@ -90,4 +90,4 @@ def test_view_change_add_one_node_uncommitted_by_next_primary(looper, tdir, tcon trigger_view_change(txnPoolNodeSet) waitForViewChange(looper, txnPoolNodeSet, 4) ensureElectionsDone(looper, txnPoolNodeSet, customTimeout=35) - sdk_ensure_pool_functional(looper, txnPoolNodeSet, sdk_wallet_client, sdk_pool_handle) + vdr_ensure_pool_functional(looper, txnPoolNodeSet, vdr_wallet_client, vdr_pool_handle) diff --git a/plenum/test/view_change_service/test_view_change_triggered.py b/plenum/test/view_change_service/test_view_change_triggered.py index 76e23ea639..390d638559 100644 --- a/plenum/test/view_change_service/test_view_change_triggered.py +++ b/plenum/test/view_change_service/test_view_change_triggered.py @@ -1,5 +1,5 @@ -from plenum.test.helper import checkViewNoForNodes, sdk_send_random_and_check, assertExp -from plenum.test.node_request.helper import sdk_ensure_pool_functional +from plenum.test.helper import checkViewNoForNodes, vdr_send_random_and_check, assertExp +from plenum.test.node_request.helper import vdr_ensure_pool_functional from plenum.test.pool_transactions.helper import disconnect_node_and_ensure_disconnected @@ -10,28 +10,28 @@ REQ_COUNT = 10 -def test_view_change_triggered(looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_client): +def test_view_change_triggered(looper, txnPoolNodeSet, vdr_pool_handle, vdr_wallet_client): current_view_no = checkViewNoForNodes(txnPoolNodeSet) trigger_view_change(txnPoolNodeSet) ensureElectionsDone(looper, txnPoolNodeSet) - sdk_ensure_pool_functional(looper, txnPoolNodeSet, sdk_wallet_client, sdk_pool_handle) + vdr_ensure_pool_functional(looper, txnPoolNodeSet, vdr_wallet_client, vdr_pool_handle) assert checkViewNoForNodes(txnPoolNodeSet) == current_view_no + 1 -def test_view_change_triggered_after_ordering(looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_client): - sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_client, REQ_COUNT) +def test_view_change_triggered_after_ordering(looper, txnPoolNodeSet, vdr_pool_handle, vdr_wallet_client): + vdr_send_random_and_check(looper, txnPoolNodeSet, vdr_pool_handle, vdr_wallet_client, REQ_COUNT) current_view_no = checkViewNoForNodes(txnPoolNodeSet) trigger_view_change(txnPoolNodeSet) ensureElectionsDone(looper, txnPoolNodeSet) - sdk_ensure_pool_functional(looper, txnPoolNodeSet, sdk_wallet_client, sdk_pool_handle) + vdr_ensure_pool_functional(looper, txnPoolNodeSet, vdr_wallet_client, vdr_pool_handle) assert checkViewNoForNodes(txnPoolNodeSet) == current_view_no + 1 -def test_view_change_with_next_primary_stopped(looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_client): +def test_view_change_with_next_primary_stopped(looper, txnPoolNodeSet, vdr_pool_handle, vdr_wallet_client): current_view_no = checkViewNoForNodes(txnPoolNodeSet) next_primary = get_next_primary_name(txnPoolNodeSet, current_view_no + 1) disconnect_node_and_ensure_disconnected(looper, txnPoolNodeSet, next_primary) @@ -40,5 +40,5 @@ def test_view_change_with_next_primary_stopped(looper, txnPoolNodeSet, sdk_pool_ trigger_view_change(remaining_nodes) ensureElectionsDone(looper, remaining_nodes, instances_list=range(2), customTimeout=15) - sdk_ensure_pool_functional(looper, remaining_nodes, sdk_wallet_client, sdk_pool_handle) + vdr_ensure_pool_functional(looper, remaining_nodes, vdr_wallet_client, vdr_pool_handle) assert checkViewNoForNodes(remaining_nodes) == current_view_no + 2 diff --git a/plenum/test/view_change_service/test_view_change_while_adding_new_node_1_slow_commit.py b/plenum/test/view_change_service/test_view_change_while_adding_new_node_1_slow_commit.py index b4d9a360c3..00fadfc0bf 100644 --- a/plenum/test/view_change_service/test_view_change_while_adding_new_node_1_slow_commit.py +++ b/plenum/test/view_change_service/test_view_change_while_adding_new_node_1_slow_commit.py @@ -3,14 +3,14 @@ def test_view_change_while_adding_new_node_1_slow_commit(looper, tdir, tconf, allPluginsPath, txnPoolNodeSet, - sdk_pool_handle, - sdk_wallet_client, - sdk_wallet_steward): + vdr_pool_handle, + vdr_wallet_client, + vdr_wallet_steward): check_view_change_adding_new_node(looper, tdir, tconf, allPluginsPath, txnPoolNodeSet, - sdk_pool_handle, - sdk_wallet_client, - sdk_wallet_steward, + vdr_pool_handle, + vdr_wallet_client, + vdr_wallet_steward, slow_nodes=[txnPoolNodeSet[1]], delay_pre_prepare=False, delay_commit=True) diff --git a/plenum/test/view_change_service/test_view_change_while_adding_new_node_1_slow_preprepare.py b/plenum/test/view_change_service/test_view_change_while_adding_new_node_1_slow_preprepare.py index 562c49ad85..dc7191b605 100644 --- a/plenum/test/view_change_service/test_view_change_while_adding_new_node_1_slow_preprepare.py +++ b/plenum/test/view_change_service/test_view_change_while_adding_new_node_1_slow_preprepare.py @@ -3,14 +3,14 @@ def test_view_change_while_adding_new_node_1_slow_preprepare(looper, tdir, tconf, allPluginsPath, txnPoolNodeSet, - sdk_pool_handle, - sdk_wallet_client, - sdk_wallet_steward): + vdr_pool_handle, + vdr_wallet_client, + vdr_wallet_steward): check_view_change_adding_new_node(looper, tdir, tconf, allPluginsPath, txnPoolNodeSet, - sdk_pool_handle, - sdk_wallet_client, - sdk_wallet_steward, + vdr_pool_handle, + vdr_wallet_client, + vdr_wallet_steward, slow_nodes=[txnPoolNodeSet[1]], delay_pre_prepare=True, delay_commit=False) diff --git a/plenum/test/view_change_service/test_view_change_while_adding_new_node_2_slow_commit.py b/plenum/test/view_change_service/test_view_change_while_adding_new_node_2_slow_commit.py index 45cbfcdb08..c9e19e1db1 100644 --- a/plenum/test/view_change_service/test_view_change_while_adding_new_node_2_slow_commit.py +++ b/plenum/test/view_change_service/test_view_change_while_adding_new_node_2_slow_commit.py @@ -3,14 +3,14 @@ def test_view_change_while_adding_new_node_2_slow_commit(looper, tdir, tconf, allPluginsPath, txnPoolNodeSet, - sdk_pool_handle, - sdk_wallet_client, - sdk_wallet_steward): + vdr_pool_handle, + vdr_wallet_client, + vdr_wallet_steward): check_view_change_adding_new_node(looper, tdir, tconf, allPluginsPath, txnPoolNodeSet, - sdk_pool_handle, - sdk_wallet_client, - sdk_wallet_steward, + vdr_pool_handle, + vdr_wallet_client, + vdr_wallet_steward, slow_nodes=[txnPoolNodeSet[1], txnPoolNodeSet[2]], delay_pre_prepare=False, delay_commit=True, diff --git a/plenum/test/view_change_slow_nodes/test_view_change_2_of_4_nodes_with_new_primary.py b/plenum/test/view_change_slow_nodes/test_view_change_2_of_4_nodes_with_new_primary.py index 7140cb3bf8..7c199cc9ee 100644 --- a/plenum/test/view_change_slow_nodes/test_view_change_2_of_4_nodes_with_new_primary.py +++ b/plenum/test/view_change_slow_nodes/test_view_change_2_of_4_nodes_with_new_primary.py @@ -8,7 +8,7 @@ def slow_nodes(node_set): def test_view_change_in_between_3pc_2_of_4_nodes_with_new_primary( - txnPoolNodeSet, looper, sdk_pool_handle, sdk_wallet_client): + txnPoolNodeSet, looper, vdr_pool_handle, vdr_wallet_client): """ - Slow processing 3PC messages for 2 of 4 node (2>f) - Slow the the first and the last non-primary node @@ -18,11 +18,11 @@ def test_view_change_in_between_3pc_2_of_4_nodes_with_new_primary( """ view_change_in_between_3pc(looper, txnPoolNodeSet, slow_nodes(txnPoolNodeSet), - sdk_pool_handle, sdk_wallet_client) + vdr_pool_handle, vdr_wallet_client) def test_view_change_in_between_3pc_2_of_4_nodes_with_new_primary_long_delay( - txnPoolNodeSet, looper, sdk_pool_handle, sdk_wallet_client): + txnPoolNodeSet, looper, vdr_pool_handle, vdr_wallet_client): """ - Slow processing 3PC messages for 2 of 4 node (2>f) - Slow the the first and the last non-primary node @@ -32,5 +32,5 @@ def test_view_change_in_between_3pc_2_of_4_nodes_with_new_primary_long_delay( """ view_change_in_between_3pc(looper, txnPoolNodeSet, slow_nodes(txnPoolNodeSet), - sdk_pool_handle, sdk_wallet_client, + vdr_pool_handle, vdr_wallet_client, slow_delay=20) diff --git a/plenum/test/view_change_slow_nodes/test_view_change_2_of_4_nodes_with_non_primary.py b/plenum/test/view_change_slow_nodes/test_view_change_2_of_4_nodes_with_non_primary.py index 6420e89b72..b7d2e7d866 100644 --- a/plenum/test/view_change_slow_nodes/test_view_change_2_of_4_nodes_with_non_primary.py +++ b/plenum/test/view_change_slow_nodes/test_view_change_2_of_4_nodes_with_non_primary.py @@ -7,7 +7,7 @@ def slow_nodes(node_set): def test_view_change_in_between_3pc_2_of_4_nodes_with_non_primary( - txnPoolNodeSet, looper, sdk_pool_handle, sdk_wallet_client): + txnPoolNodeSet, looper, vdr_pool_handle, vdr_wallet_client): """ - Slow processing 3PC messages for 2 of 4 node (2>f). - Both nodes are non-primary for master neither in this nor the next view @@ -15,11 +15,11 @@ def test_view_change_in_between_3pc_2_of_4_nodes_with_non_primary( """ view_change_in_between_3pc(looper, txnPoolNodeSet, slow_nodes(txnPoolNodeSet), - sdk_pool_handle, sdk_wallet_client) + vdr_pool_handle, vdr_wallet_client) def test_view_change_in_between_3pc_2_of_4_nodes_with_non_primary_long_delay( - txnPoolNodeSet, looper, sdk_pool_handle, sdk_wallet_client): + txnPoolNodeSet, looper, vdr_pool_handle, vdr_wallet_client): """ - Slow processing 3PC messages for 2 of 4 node (2>f). - Both nodes are non-primary for master neither in this nor the next view @@ -27,5 +27,5 @@ def test_view_change_in_between_3pc_2_of_4_nodes_with_non_primary_long_delay( """ view_change_in_between_3pc(looper, txnPoolNodeSet, slow_nodes(txnPoolNodeSet), - sdk_pool_handle, sdk_wallet_client, + vdr_pool_handle, vdr_wallet_client, slow_delay=20) diff --git a/plenum/test/view_change_slow_nodes/test_view_change_2_of_4_nodes_with_old_and_new_primary.py b/plenum/test/view_change_slow_nodes/test_view_change_2_of_4_nodes_with_old_and_new_primary.py index b068d153b6..00f66e6c30 100644 --- a/plenum/test/view_change_slow_nodes/test_view_change_2_of_4_nodes_with_old_and_new_primary.py +++ b/plenum/test/view_change_slow_nodes/test_view_change_2_of_4_nodes_with_old_and_new_primary.py @@ -8,7 +8,7 @@ def slow_nodes(node_set): def test_view_change_in_between_3pc_2_of_4_nodes_with_old_and_new_primary( - txnPoolNodeSet, looper, sdk_pool_handle, sdk_wallet_client): + txnPoolNodeSet, looper, vdr_pool_handle, vdr_wallet_client): """ - Slow processing 3PC messages for 2 of 4 node (2>f) - Slow both current and next primaries @@ -16,11 +16,11 @@ def test_view_change_in_between_3pc_2_of_4_nodes_with_old_and_new_primary( """ view_change_in_between_3pc(looper, txnPoolNodeSet, slow_nodes(txnPoolNodeSet), - sdk_pool_handle, sdk_wallet_client) + vdr_pool_handle, vdr_wallet_client) def test_view_change_in_between_3pc_2_of_4_nodes_with_old_and_new_primary_long_delay( - txnPoolNodeSet, looper, sdk_pool_handle, sdk_wallet_client): + txnPoolNodeSet, looper, vdr_pool_handle, vdr_wallet_client): """ - Slow processing 3PC messages for 2 of 4 node (2>f) - Slow both current and next primaries @@ -28,5 +28,5 @@ def test_view_change_in_between_3pc_2_of_4_nodes_with_old_and_new_primary_long_d """ view_change_in_between_3pc(looper, txnPoolNodeSet, slow_nodes(txnPoolNodeSet), - sdk_pool_handle, sdk_wallet_client, + vdr_pool_handle, vdr_wallet_client, slow_delay=20) diff --git a/plenum/test/view_change_slow_nodes/test_view_change_2_of_4_nodes_with_old_primary.py b/plenum/test/view_change_slow_nodes/test_view_change_2_of_4_nodes_with_old_primary.py index 6c8d7c640a..c63d5091d2 100644 --- a/plenum/test/view_change_slow_nodes/test_view_change_2_of_4_nodes_with_old_primary.py +++ b/plenum/test/view_change_slow_nodes/test_view_change_2_of_4_nodes_with_old_primary.py @@ -8,7 +8,7 @@ def slow_nodes(node_set): def test_view_change_in_between_3pc_2_of_4_nodes_with_old_primary( - txnPoolNodeSet, looper, sdk_pool_handle, sdk_wallet_client): + txnPoolNodeSet, looper, vdr_pool_handle, vdr_wallet_client): """ - Slow processing 3PC messages for 2 of 4 node (2>f) - Slow the current Primary node and the last non-primary node (it will not @@ -17,11 +17,11 @@ def test_view_change_in_between_3pc_2_of_4_nodes_with_old_primary( """ view_change_in_between_3pc(looper, txnPoolNodeSet, slow_nodes(txnPoolNodeSet), - sdk_pool_handle, sdk_wallet_client) + vdr_pool_handle, vdr_wallet_client) def test_view_change_in_between_3pc_2_of_4_nodes_with_old_primary_long_delay( - txnPoolNodeSet, looper, sdk_pool_handle, sdk_wallet_client): + txnPoolNodeSet, looper, vdr_pool_handle, vdr_wallet_client): """ - Slow processing 3PC messages for 2 of 4 node (2>f) - Slow the current Primary node and the last non-primary node (it will not @@ -30,5 +30,5 @@ def test_view_change_in_between_3pc_2_of_4_nodes_with_old_primary_long_delay( """ view_change_in_between_3pc(looper, txnPoolNodeSet, slow_nodes(txnPoolNodeSet), - sdk_pool_handle, sdk_wallet_client, + vdr_pool_handle, vdr_wallet_client, slow_delay=20) diff --git a/plenum/test/view_change_slow_nodes/test_view_change_all_nodes.py b/plenum/test/view_change_slow_nodes/test_view_change_all_nodes.py index f32f3c9745..2e993ab8d3 100644 --- a/plenum/test/view_change_slow_nodes/test_view_change_all_nodes.py +++ b/plenum/test/view_change_slow_nodes/test_view_change_all_nodes.py @@ -2,23 +2,23 @@ def test_view_change_in_between_3pc_all_nodes(txnPoolNodeSet, looper, - sdk_pool_handle, sdk_wallet_client): + vdr_pool_handle, vdr_wallet_client): """ - Slow processing 3PC messages for all nodes - do view change """ view_change_in_between_3pc(looper, txnPoolNodeSet, txnPoolNodeSet, - sdk_pool_handle, - sdk_wallet_client) + vdr_pool_handle, + vdr_wallet_client) def test_view_change_in_between_3pc_all_nodes_long_delay( - txnPoolNodeSet, looper, sdk_pool_handle, sdk_wallet_client): + txnPoolNodeSet, looper, vdr_pool_handle, vdr_wallet_client): """ - Slow processing 3PC messages for all nodes - do view change """ view_change_in_between_3pc(looper, txnPoolNodeSet, txnPoolNodeSet, - sdk_pool_handle, sdk_wallet_client, + vdr_pool_handle, vdr_wallet_client, slow_delay=20) diff --git a/plenum/test/view_change_slow_nodes/test_view_change_all_nodes_random_delay.py b/plenum/test/view_change_slow_nodes/test_view_change_all_nodes_random_delay.py index ef361d6fde..e7795d0d66 100644 --- a/plenum/test/view_change_slow_nodes/test_view_change_all_nodes_random_delay.py +++ b/plenum/test/view_change_slow_nodes/test_view_change_all_nodes_random_delay.py @@ -4,24 +4,24 @@ def test_view_change_in_between_3pc_all_nodes_random_delays( - txnPoolNodeSet, tconf, looper, sdk_pool_handle, sdk_wallet_client): + txnPoolNodeSet, tconf, looper, vdr_pool_handle, vdr_wallet_client): """ - Slow processing 3PC messages for all nodes randomly - do view change """ view_change_in_between_3pc_random_delays(looper, txnPoolNodeSet, txnPoolNodeSet, - sdk_pool_handle, - sdk_wallet_client, tconf) + vdr_pool_handle, + vdr_wallet_client, tconf) def test_view_change_in_between_3pc_all_nodes_random_delays_long_delay( - txnPoolNodeSet, looper, sdk_pool_handle, sdk_wallet_client, tconf): + txnPoolNodeSet, looper, vdr_pool_handle, vdr_wallet_client, tconf): """ - Slow processing 3PC messages for all nodes randomly - do view change """ view_change_in_between_3pc_random_delays(looper, txnPoolNodeSet, txnPoolNodeSet, - sdk_pool_handle, sdk_wallet_client, tconf, + vdr_pool_handle, vdr_wallet_client, tconf, min_delay=5) diff --git a/plenum/test/view_change_slow_nodes/test_view_change_complex.py b/plenum/test/view_change_slow_nodes/test_view_change_complex.py index 54b991aeb5..a4ed64e8c6 100644 --- a/plenum/test/view_change_slow_nodes/test_view_change_complex.py +++ b/plenum/test/view_change_slow_nodes/test_view_change_complex.py @@ -5,7 +5,7 @@ def test_view_change_complex( - txnPoolNodeSet, looper, sdk_pool_handle, sdk_wallet_client, tconf): + txnPoolNodeSet, looper, vdr_pool_handle, vdr_wallet_client, tconf): """ - Complex scenario with multiple view changes """ @@ -35,8 +35,8 @@ def test_view_change_complex( looper, txnPoolNodeSet, txnPoolNodeSet, - sdk_pool_handle, - sdk_wallet_client, + vdr_pool_handle, + vdr_wallet_client, tconf, min_delay=0, max_delay=10) @@ -44,8 +44,8 @@ def test_view_change_complex( looper, txnPoolNodeSet, txnPoolNodeSet, - sdk_pool_handle, - sdk_wallet_client, + vdr_pool_handle, + vdr_wallet_client, tconf, min_delay=1, max_delay=5) @@ -53,7 +53,7 @@ def test_view_change_complex( looper, txnPoolNodeSet, txnPoolNodeSet, - sdk_pool_handle, - sdk_wallet_client, + vdr_pool_handle, + vdr_wallet_client, tconf, min_delay=5) diff --git a/plenum/test/view_change_slow_nodes/test_view_change_gc_all_nodes_random_delay.py b/plenum/test/view_change_slow_nodes/test_view_change_gc_all_nodes_random_delay.py index 93e4a564e0..0ac3bfa4c2 100644 --- a/plenum/test/view_change_slow_nodes/test_view_change_gc_all_nodes_random_delay.py +++ b/plenum/test/view_change_slow_nodes/test_view_change_gc_all_nodes_random_delay.py @@ -8,8 +8,8 @@ from plenum.test import waits from plenum.test.helper import checkViewNoForNodes, \ - check_last_ordered_3pc, sdk_send_random_request, sdk_get_replies, \ - sdk_send_random_and_check, get_pp_seq_no + check_last_ordered_3pc, vdr_send_random_request, vdr_get_replies, \ + vdr_send_random_and_check, get_pp_seq_no from plenum.test.delayers import delay_3pc_messages, \ reset_delays_and_process_delayeds from plenum.test.view_change.helper import ensure_view_change_complete, ensure_view_change @@ -28,7 +28,7 @@ def check_nodes_requests_size(nodes, size): def test_view_change_gc_in_between_3pc_all_nodes_delays( - looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_client): + looper, txnPoolNodeSet, vdr_pool_handle, vdr_wallet_client): """ Test that garbage collector compares the whole 3PC key (viewNo, ppSeqNo) and does not remove messages from node's queues that have higher @@ -45,10 +45,10 @@ def test_view_change_gc_in_between_3pc_all_nodes_delays( # for master instances only cause non-master ones have # specific logic of its management which we don't care in # the test, see Replica::_setup_for_non_master) - sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle, - sdk_wallet_client, 1) - sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle, - sdk_wallet_client, 1) + vdr_send_random_and_check(looper, txnPoolNodeSet, vdr_pool_handle, + vdr_wallet_client, 1) + vdr_send_random_and_check(looper, txnPoolNodeSet, vdr_pool_handle, + vdr_wallet_client, 1) batches_count = get_pp_seq_no(txnPoolNodeSet) last_ordered_3pc = (viewNo, batches_count) check_nodes_last_ordered_3pc(txnPoolNodeSet, last_ordered_3pc) @@ -76,7 +76,7 @@ def test_view_change_gc_in_between_3pc_all_nodes_delays( delay_3pc_messages(txnPoolNodeSet, 1, delay=propagationTimeout * 2) - requests = sdk_send_random_request(looper, sdk_pool_handle, sdk_wallet_client) + requests = vdr_send_random_request(looper, vdr_pool_handle, vdr_wallet_client) def checkPrePrepareSentAtLeastByPrimary(): for node in txnPoolNodeSet: @@ -111,7 +111,7 @@ def checkPrePrepareSentAtLeastByPrimary(): ensureElectionsDone(looper=looper, nodes=txnPoolNodeSet) ensure_all_nodes_have_same_data(looper, txnPoolNodeSet) - sdk_get_replies(looper, [requests]) + vdr_get_replies(looper, [requests]) batches_count += 1 checkViewNoForNodes(txnPoolNodeSet, viewNo) diff --git a/plenum/test/view_change_with_delays/helper.py b/plenum/test/view_change_with_delays/helper.py index 9b0b4583f4..1f3ad9914a 100644 --- a/plenum/test/view_change_with_delays/helper.py +++ b/plenum/test/view_change_with_delays/helper.py @@ -4,9 +4,9 @@ from plenum.server.node import Node from plenum.test import waits from plenum.test.delayers import icDelay, cDelay, pDelay, nv_delay -from plenum.test.helper import sdk_send_random_request, sdk_get_reply, waitForViewChange +from plenum.test.helper import vdr_send_random_request, vdr_get_reply, waitForViewChange from plenum.test.node_catchup.helper import ensure_all_nodes_have_same_data -from plenum.test.node_request.helper import sdk_ensure_pool_functional +from plenum.test.node_request.helper import vdr_ensure_pool_functional from plenum.test.pool_transactions.helper import disconnect_node_and_ensure_disconnected from plenum.test.stasher import delay_rules, delay_rules_without_processing from plenum.test.test_node import getRequiredInstances, ensureElectionsDone, checkNodesConnected @@ -111,7 +111,7 @@ def do_view_change_with_pending_request_and_one_fast_node(fast_node, with delay_rules(slow_stashers, cDelay()): with delay_rules(fast_stasher, cDelay()): # Send request - request = sdk_send_random_request(looper, sdk_pool_handle, sdk_wallet_client) + request = vdr_send_random_request(looper, sdk_pool_handle, sdk_wallet_client) # Wait until this request is prepared on N-f nodes looper.run(eventually(check_last_prepared_certificate_on_quorum, nodes, (lpc[0], lpc[1] + 1))) @@ -124,7 +124,7 @@ def do_view_change_with_pending_request_and_one_fast_node(fast_node, looper.run(eventually(check_view_change_done, nodes, view_no + 1, timeout=60)) # Finish request gracefully - sdk_get_reply(looper, request) + vdr_get_reply(looper, request) def do_view_change_with_unaligned_prepare_certificates( @@ -142,7 +142,7 @@ def do_view_change_with_unaligned_prepare_certificates( with delay_rules(slow_stashers, pDelay()): with delay_rules(all_stashers, cDelay()): # Send request - request = sdk_send_random_request(looper, sdk_pool_handle, sdk_wallet_client) + request = vdr_send_random_request(looper, sdk_pool_handle, sdk_wallet_client) # Wait until this request is prepared on fast nodes looper.run(eventually(check_last_prepared_certificate, fast_nodes, (0, 1))) @@ -157,10 +157,10 @@ def do_view_change_with_unaligned_prepare_certificates( looper.run(eventually(check_view_change_done, nodes, 1, timeout=60)) # Finish request gracefully - sdk_get_reply(looper, request) + vdr_get_reply(looper, request) ensure_all_nodes_have_same_data(looper, nodes) - sdk_ensure_pool_functional(looper, nodes, sdk_wallet_client, sdk_pool_handle) + vdr_ensure_pool_functional(looper, nodes, sdk_wallet_client, sdk_pool_handle) def do_view_change_with_delay_on_one_node(slow_node, nodes, looper, @@ -180,7 +180,7 @@ def do_view_change_with_delay_on_one_node(slow_node, nodes, looper, with delay_rules(slow_stasher, icDelay()): with delay_rules(stashers, cDelay()): # Send request - request = sdk_send_random_request(looper, sdk_pool_handle, sdk_wallet_client) + request = vdr_send_random_request(looper, sdk_pool_handle, sdk_wallet_client) # Wait until this request is prepared on N-f nodes looper.run(eventually(check_last_prepared_certificate_on_quorum, nodes, (lpc[0], lpc[1] + 1))) @@ -215,7 +215,7 @@ def do_view_change_with_delay_on_one_node(slow_node, nodes, looper, timeout=waits.expectedPoolElectionTimeout(len(nodes))) # Finish request gracefully - sdk_get_reply(looper, request) + vdr_get_reply(looper, request) def do_view_change_with_propagate_primary_on_one_delayed_node( @@ -236,7 +236,7 @@ def do_view_change_with_propagate_primary_on_one_delayed_node( with delay_rules(slow_stasher, nv_delay()): with delay_rules(stashers, cDelay()): # Send request - request = sdk_send_random_request(looper, sdk_pool_handle, sdk_wallet_client) + request = vdr_send_random_request(looper, sdk_pool_handle, sdk_wallet_client) # Wait until this request is prepared on N-f nodes looper.run(eventually(check_last_prepared_certificate_on_quorum, nodes, (lpc[0], lpc[1] + 1))) @@ -272,7 +272,7 @@ def do_view_change_with_propagate_primary_on_one_delayed_node( # started propagate primary to the same view. # Finish request gracefully - sdk_get_reply(looper, request) + vdr_get_reply(looper, request) def do_view_change_with_delayed_commits_and_node_restarts(fast_nodes, slow_nodes, nodes_to_restart, @@ -299,7 +299,7 @@ def do_view_change_with_delayed_commits_and_node_restarts(fast_nodes, slow_nodes # Delay commits on `slow_nodes` with delay_rules_without_processing(slow_stashers, cDelay()): - request = sdk_send_random_request(looper, sdk_pool_handle, sdk_wallet_client) + request = vdr_send_random_request(looper, sdk_pool_handle, sdk_wallet_client) # Check that all of the nodes except the slows one ordered the request looper.run(eventually(check_last_ordered, fast_nodes, (old_view_no, old_last_ordered[1] + 1))) @@ -340,5 +340,5 @@ def do_view_change_with_delayed_commits_and_node_restarts(fast_nodes, slow_nodes ) ensureElectionsDone(looper=looper, nodes=nodes) ensure_all_nodes_have_same_data(looper, nodes) - sdk_get_reply(looper, request) - sdk_ensure_pool_functional(looper, nodes, sdk_wallet_client, sdk_pool_handle) + vdr_get_reply(looper, request) + vdr_ensure_pool_functional(looper, nodes, sdk_wallet_client, sdk_pool_handle) diff --git a/plenum/test/view_change_with_delays/test_two_view_changes_with_delay_on_one_node.py b/plenum/test/view_change_with_delays/test_two_view_changes_with_delay_on_one_node.py index 6b6d7ed676..7b8055165b 100644 --- a/plenum/test/view_change_with_delays/test_two_view_changes_with_delay_on_one_node.py +++ b/plenum/test/view_change_with_delays/test_two_view_changes_with_delay_on_one_node.py @@ -1,7 +1,7 @@ import pytest from plenum.test.node_catchup.helper import ensure_all_nodes_have_same_data -from plenum.test.helper import sdk_send_random_and_check, perf_monitor_disabled +from plenum.test.helper import vdr_send_random_and_check, perf_monitor_disabled from plenum.test.view_change_with_delays.helper import \ do_view_change_with_delay_on_one_node @@ -20,7 +20,7 @@ def tconf(tconf): def test_two_view_changes_with_delay_on_one_node( - txnPoolNodeSet, looper, sdk_pool_handle, sdk_wallet_client, tconf): + txnPoolNodeSet, looper, vdr_pool_handle, vdr_wallet_client, tconf): """ Perform two view changes in such a way that a view change is performed on one slow node later than on the other nodes so that delayed Commits are @@ -29,10 +29,10 @@ def test_two_view_changes_with_delay_on_one_node( that verify that a new request can be ordered. """ do_view_change_with_delay_on_one_node(txnPoolNodeSet[-1], txnPoolNodeSet, looper, - sdk_pool_handle, sdk_wallet_client) + vdr_pool_handle, vdr_wallet_client) do_view_change_with_delay_on_one_node(txnPoolNodeSet[0], txnPoolNodeSet, looper, - sdk_pool_handle, sdk_wallet_client) + vdr_pool_handle, vdr_wallet_client) - sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_client, 1) + vdr_send_random_and_check(looper, txnPoolNodeSet, vdr_pool_handle, vdr_wallet_client, 1) ensure_all_nodes_have_same_data(looper, txnPoolNodeSet) diff --git a/plenum/test/view_change_with_delays/test_two_view_changes_with_delayed_commits.py b/plenum/test/view_change_with_delays/test_two_view_changes_with_delayed_commits.py index e92a0ad4e5..12c948eaef 100644 --- a/plenum/test/view_change_with_delays/test_two_view_changes_with_delayed_commits.py +++ b/plenum/test/view_change_with_delays/test_two_view_changes_with_delayed_commits.py @@ -1,6 +1,6 @@ import pytest -from plenum.test.helper import sdk_send_random_and_check, perf_monitor_disabled +from plenum.test.helper import vdr_send_random_and_check, perf_monitor_disabled from plenum.test.node_catchup.helper import ensure_all_nodes_have_same_data from plenum.test.view_change_with_delays.helper import do_view_change_with_pending_request_and_one_fast_node @@ -19,20 +19,20 @@ def tconf(tconf): def test_two_view_changes_with_delayed_commits(txnPoolNodeSet, looper, - sdk_pool_handle, - sdk_wallet_client, + vdr_pool_handle, + vdr_wallet_client, tconf): # Perform view change with Delta acting as fast node # With current view change implementation its state will become different from other nodes do_view_change_with_pending_request_and_one_fast_node(txnPoolNodeSet[3], txnPoolNodeSet, - looper, sdk_pool_handle, sdk_wallet_client) + looper, vdr_pool_handle, vdr_wallet_client) # Perform view change with Alpha acting as fast node # With current view change implementation its state will become different from other nodes, # resulting in pool losing consensus and failing to finish view change at all do_view_change_with_pending_request_and_one_fast_node(txnPoolNodeSet[0], txnPoolNodeSet, - looper, sdk_pool_handle, sdk_wallet_client) + looper, vdr_pool_handle, vdr_wallet_client) # Check that pool can write transactions - sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_client, 1) + vdr_send_random_and_check(looper, txnPoolNodeSet, vdr_pool_handle, vdr_wallet_client, 1) ensure_all_nodes_have_same_data(looper, txnPoolNodeSet) diff --git a/plenum/test/view_change_with_delays/test_two_view_changes_with_propagate_primary_on_one_delayed_node.py b/plenum/test/view_change_with_delays/test_two_view_changes_with_propagate_primary_on_one_delayed_node.py index 150c3f8bab..c0ca54966b 100644 --- a/plenum/test/view_change_with_delays/test_two_view_changes_with_propagate_primary_on_one_delayed_node.py +++ b/plenum/test/view_change_with_delays/test_two_view_changes_with_propagate_primary_on_one_delayed_node.py @@ -1,6 +1,6 @@ import pytest -from plenum.test.helper import sdk_send_random_and_check, perf_monitor_disabled +from plenum.test.helper import vdr_send_random_and_check, perf_monitor_disabled from plenum.test.node_catchup.helper import ensure_all_nodes_have_same_data from plenum.test.view_change_with_delays.helper import \ do_view_change_with_propagate_primary_on_one_delayed_node @@ -20,7 +20,7 @@ def tconf(tconf): def test_two_view_changes_with_propagate_primary_on_one_delayed_node( - txnPoolNodeSet, looper, sdk_pool_handle, sdk_wallet_client, tconf): + txnPoolNodeSet, looper, vdr_pool_handle, vdr_wallet_client, tconf): """ Perform two view changes in such a way that a view change is performed on all the nodes except for one slow node and then propagate primary is @@ -30,10 +30,10 @@ def test_two_view_changes_with_propagate_primary_on_one_delayed_node( be ordered. """ do_view_change_with_propagate_primary_on_one_delayed_node( - txnPoolNodeSet[-1], txnPoolNodeSet, looper, sdk_pool_handle, sdk_wallet_client) + txnPoolNodeSet[-1], txnPoolNodeSet, looper, vdr_pool_handle, vdr_wallet_client) do_view_change_with_propagate_primary_on_one_delayed_node( - txnPoolNodeSet[0], txnPoolNodeSet, looper, sdk_pool_handle, sdk_wallet_client) + txnPoolNodeSet[0], txnPoolNodeSet, looper, vdr_pool_handle, vdr_wallet_client) - sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_client, 1) + vdr_send_random_and_check(looper, txnPoolNodeSet, vdr_pool_handle, vdr_wallet_client, 1) ensure_all_nodes_have_same_data(looper, txnPoolNodeSet) diff --git a/plenum/test/view_change_with_delays/test_view_change_during_unstash.py b/plenum/test/view_change_with_delays/test_view_change_during_unstash.py index 6e92eb1d34..81744a9d2e 100644 --- a/plenum/test/view_change_with_delays/test_view_change_during_unstash.py +++ b/plenum/test/view_change_with_delays/test_view_change_during_unstash.py @@ -6,10 +6,10 @@ from plenum.common.util import compare_3PC_keys from plenum.server.catchup.node_leecher_service import NodeLeecherService from plenum.test.delayers import icDelay, cr_delay, delay_3pc -from plenum.test.helper import max_3pc_batch_limits, sdk_send_random_and_check, \ - sdk_send_random_requests, sdk_get_replies, sdk_check_reply +from plenum.test.helper import max_3pc_batch_limits, vdr_send_random_and_check, \ + vdr_send_random_requests, vdr_get_replies, vdr_check_reply from plenum.test.node_catchup.helper import ensure_all_nodes_have_same_data -from plenum.test.node_request.helper import sdk_ensure_pool_functional +from plenum.test.node_request.helper import vdr_ensure_pool_functional from plenum.test.stasher import start_delaying, stop_delaying_and_process, delay_rules from plenum.test.test_node import ensureElectionsDone from plenum.test.view_change_service.helper import trigger_view_change @@ -35,7 +35,7 @@ def check_catchup_is_finished(node): assert node.ledgerManager._node_leecher._state == NodeLeecherService.State.Idle -def test_view_change_during_unstash(looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_client, tconf): +def test_view_change_during_unstash(looper, txnPoolNodeSet, vdr_pool_handle, vdr_wallet_client, tconf): slow_node = txnPoolNodeSet[-1] other_nodes = txnPoolNodeSet[:-1] @@ -44,7 +44,7 @@ def test_view_change_during_unstash(looper, txnPoolNodeSet, sdk_pool_handle, sdk all_stashers = [n.nodeIbStasher for n in txnPoolNodeSet] # Preload nodes with some transactions - sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_client, 1) + vdr_send_random_and_check(looper, txnPoolNodeSet, vdr_pool_handle, vdr_wallet_client, 1) for node in txnPoolNodeSet: assert node.master_replica.last_ordered_3pc == (0, 1) @@ -54,7 +54,7 @@ def test_view_change_during_unstash(looper, txnPoolNodeSet, sdk_pool_handle, sdk # Stop ordering on slow node and send requests slow_node_after_5 = start_delaying(slow_stasher, delay_3pc(view_no=0, after=5, msgs=Commit)) slow_node_until_5 = start_delaying(slow_stasher, delay_3pc(view_no=0, after=0)) - reqs_view_0 = sdk_send_random_requests(looper, sdk_pool_handle, sdk_wallet_client, 8) + reqs_view_0 = vdr_send_random_requests(looper, vdr_pool_handle, vdr_wallet_client, 8) # Make pool order first 2 batches and pause pool_after_3 = start_delaying(other_stashers, delay_3pc(view_no=0, after=3)) @@ -79,11 +79,11 @@ def test_view_change_during_unstash(looper, txnPoolNodeSet, sdk_pool_handle, sdk stop_delaying_and_process(slow_node_after_5) # Ensure that expected number of requests was ordered - replies = sdk_get_replies(looper, reqs_view_0) + replies = vdr_get_replies(looper, reqs_view_0) for rep in replies[:6]: - sdk_check_reply(rep) + vdr_check_reply(rep) # Ensure that everything is ok ensureElectionsDone(looper, txnPoolNodeSet) ensure_all_nodes_have_same_data(looper, txnPoolNodeSet) - sdk_ensure_pool_functional(looper, txnPoolNodeSet, sdk_wallet_client, sdk_pool_handle) + vdr_ensure_pool_functional(looper, txnPoolNodeSet, vdr_wallet_client, vdr_pool_handle) diff --git a/plenum/test/view_change_with_delays/test_view_change_with_advancing_node.py b/plenum/test/view_change_with_delays/test_view_change_with_advancing_node.py index 229dc16398..ecdb5ce080 100644 --- a/plenum/test/view_change_with_delays/test_view_change_with_advancing_node.py +++ b/plenum/test/view_change_with_delays/test_view_change_with_advancing_node.py @@ -3,10 +3,10 @@ from plenum.server.node import Node from plenum.test.delayers import cDelay -from plenum.test.helper import sdk_send_random_and_check, \ - sdk_send_random_requests, sdk_get_replies, perf_monitor_disabled +from plenum.test.helper import vdr_send_random_and_check, \ + vdr_send_random_requests, vdr_get_replies, perf_monitor_disabled from plenum.test.node_catchup.helper import ensure_all_nodes_have_same_data -from plenum.test.node_request.helper import sdk_ensure_pool_functional +from plenum.test.node_request.helper import vdr_ensure_pool_functional from plenum.test.stasher import delay_rules from plenum.test.view_change_service.helper import trigger_view_change from stp_core.loop.eventually import eventually @@ -19,8 +19,8 @@ def tconf(tconf): def test_delay_commits(txnPoolNodeSet, looper, - sdk_pool_handle, - sdk_wallet_client, + vdr_pool_handle, + vdr_wallet_client, tconf): """ #3 @@ -41,8 +41,8 @@ def test_delay_commits(txnPoolNodeSet, looper, Expected result with current view change: node X can't finish second transaction """ - sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle, - sdk_wallet_client, 1) + vdr_send_random_and_check(looper, txnPoolNodeSet, vdr_pool_handle, + vdr_wallet_client, 1) nodes_stashers = [n.nodeIbStasher for n in txnPoolNodeSet if n != txnPoolNodeSet[-1]] for _ in range(2): @@ -50,8 +50,8 @@ def test_delay_commits(txnPoolNodeSet, looper, nodes_stashers, txnPoolNodeSet[-1], looper, - sdk_pool_handle, - sdk_wallet_client) + vdr_pool_handle, + vdr_wallet_client) def do_view_change_with_delayed_commits_on_all_but_one(nodes, nodes_without_one_stashers, @@ -64,7 +64,7 @@ def do_view_change_with_delayed_commits_on_all_but_one(nodes, nodes_without_one_ # delay commits for all nodes except node X with delay_rules(nodes_without_one_stashers, cDelay(sys.maxsize)): # send one request - requests2 = sdk_send_random_requests(looper, sdk_pool_handle, + requests2 = vdr_send_random_requests(looper, sdk_pool_handle, sdk_wallet_client, 1) def last_ordered(node: Node, last_ordered): @@ -81,9 +81,9 @@ def last_ordered(node: Node, last_ordered): # wait for view change done on all nodes looper.run(eventually(view_change_done, nodes, new_view_no)) - sdk_get_replies(looper, requests2) + vdr_get_replies(looper, requests2) ensure_all_nodes_have_same_data(looper, nodes) - sdk_ensure_pool_functional(looper, nodes, sdk_wallet_client, sdk_pool_handle) + vdr_ensure_pool_functional(looper, nodes, sdk_wallet_client, sdk_pool_handle) def last_prepared_certificate(nodes, num): diff --git a/plenum/test/view_change_with_delays/test_view_change_with_delay_on_one_node.py b/plenum/test/view_change_with_delays/test_view_change_with_delay_on_one_node.py index 406ac77ff9..d294ebd99a 100644 --- a/plenum/test/view_change_with_delays/test_view_change_with_delay_on_one_node.py +++ b/plenum/test/view_change_with_delays/test_view_change_with_delay_on_one_node.py @@ -20,7 +20,7 @@ def tconf(tconf): def test_view_change_with_delay_on_one_node( - txnPoolNodeSet, looper, sdk_pool_handle, sdk_wallet_client, tconf): + txnPoolNodeSet, looper, vdr_pool_handle, vdr_wallet_client, tconf): """ Perform view change on one slow node later than on the other nodes so that delayed Commits are processed by the slow node in the old view and by the @@ -28,6 +28,6 @@ def test_view_change_with_delay_on_one_node( same ledgers and state. """ do_view_change_with_delay_on_one_node(txnPoolNodeSet[-1], txnPoolNodeSet, looper, - sdk_pool_handle, sdk_wallet_client) + vdr_pool_handle, vdr_wallet_client) ensure_all_nodes_have_same_data(looper, txnPoolNodeSet) diff --git a/plenum/test/view_change_with_delays/test_view_change_with_delayed_commits.py b/plenum/test/view_change_with_delays/test_view_change_with_delayed_commits.py index 9ed66bdd65..9305e4429b 100644 --- a/plenum/test/view_change_with_delays/test_view_change_with_delayed_commits.py +++ b/plenum/test/view_change_with_delays/test_view_change_with_delayed_commits.py @@ -2,7 +2,7 @@ from plenum.test.helper import perf_monitor_disabled from plenum.test.node_catchup.helper import ensure_all_nodes_have_same_data -from plenum.test.node_request.helper import sdk_ensure_pool_functional +from plenum.test.node_request.helper import vdr_ensure_pool_functional from plenum.test.view_change_with_delays.helper import do_view_change_with_pending_request_and_one_fast_node # This is needed only with current view change implementation to give enough time @@ -20,13 +20,13 @@ def tconf(tconf): def test_view_change_with_delayed_commits(txnPoolNodeSet, looper, - sdk_pool_handle, - sdk_wallet_client, + vdr_pool_handle, + vdr_wallet_client, tconf): # Perform view change with Delta acting as fast node # With current view change implementation its state will become different from other nodes do_view_change_with_pending_request_and_one_fast_node(txnPoolNodeSet[3], txnPoolNodeSet, - looper, sdk_pool_handle, sdk_wallet_client) + looper, vdr_pool_handle, vdr_wallet_client) ensure_all_nodes_have_same_data(looper, txnPoolNodeSet) - sdk_ensure_pool_functional(looper, txnPoolNodeSet, sdk_wallet_client, sdk_pool_handle) + vdr_ensure_pool_functional(looper, txnPoolNodeSet, vdr_wallet_client, vdr_pool_handle) diff --git a/plenum/test/view_change_with_delays/test_view_change_with_delayed_commits_on_all_but_one_node_and_restart_of_those_nodes.py b/plenum/test/view_change_with_delays/test_view_change_with_delayed_commits_on_all_but_one_node_and_restart_of_those_nodes.py index c378b6298c..fa92b1a7fc 100644 --- a/plenum/test/view_change_with_delays/test_view_change_with_delayed_commits_on_all_but_one_node_and_restart_of_those_nodes.py +++ b/plenum/test/view_change_with_delays/test_view_change_with_delayed_commits_on_all_but_one_node_and_restart_of_those_nodes.py @@ -15,8 +15,8 @@ def tconf(tconf): @pytest.mark.skip(reason="Should be fixed by: INDY-2238 (Persist 3PC messages during Ordering)") def test_view_change_with_delayed_commits_on_all_but_one_node_and_restart_of_those_nodes(txnPoolNodeSet, looper, - sdk_pool_handle, - sdk_wallet_client, tconf, tdir, + vdr_pool_handle, + vdr_wallet_client, tconf, tdir, allPluginsPath): """ Order transactions on only one node @@ -35,8 +35,8 @@ def test_view_change_with_delayed_commits_on_all_but_one_node_and_restart_of_tho old_view_no=slow_nodes[0].viewNo, old_last_ordered=slow_nodes[0].master_replica.last_ordered_3pc, looper=looper, - sdk_pool_handle=sdk_pool_handle, - sdk_wallet_client=sdk_wallet_client, + sdk_pool_handle=vdr_pool_handle, + sdk_wallet_client=vdr_wallet_client, tconf=tconf, tdir=tdir, all_plugins_path=allPluginsPath, diff --git a/plenum/test/view_change_with_delays/test_view_change_with_delayed_commits_on_half_of_the_nodes_and_restart_of_that_half.py b/plenum/test/view_change_with_delays/test_view_change_with_delayed_commits_on_half_of_the_nodes_and_restart_of_that_half.py index 1cb84d6be1..1d012a4be2 100644 --- a/plenum/test/view_change_with_delays/test_view_change_with_delayed_commits_on_half_of_the_nodes_and_restart_of_that_half.py +++ b/plenum/test/view_change_with_delays/test_view_change_with_delayed_commits_on_half_of_the_nodes_and_restart_of_that_half.py @@ -14,8 +14,8 @@ def tconf(tconf): def test_view_change_with_delayed_commits_on_half_of_the_nodes_and_restart_of_that_half(txnPoolNodeSet, looper, - sdk_pool_handle, - sdk_wallet_client, tconf, tdir, + vdr_pool_handle, + vdr_wallet_client, tconf, tdir, allPluginsPath): """ Order transactions on half of the pool @@ -34,8 +34,8 @@ def test_view_change_with_delayed_commits_on_half_of_the_nodes_and_restart_of_th old_view_no=slow_nodes[0].viewNo, old_last_ordered=slow_nodes[0].master_replica.last_ordered_3pc, looper=looper, - sdk_pool_handle=sdk_pool_handle, - sdk_wallet_client=sdk_wallet_client, + sdk_pool_handle=vdr_pool_handle, + sdk_wallet_client=vdr_wallet_client, tconf=tconf, tdir=tdir, all_plugins_path=allPluginsPath, diff --git a/plenum/test/view_change_with_delays/test_view_change_with_delayed_commits_on_half_of_the_nodes_and_restart_of_the_other_half.py b/plenum/test/view_change_with_delays/test_view_change_with_delayed_commits_on_half_of_the_nodes_and_restart_of_the_other_half.py index dc38c5ab6a..420e1c5f8c 100644 --- a/plenum/test/view_change_with_delays/test_view_change_with_delayed_commits_on_half_of_the_nodes_and_restart_of_the_other_half.py +++ b/plenum/test/view_change_with_delays/test_view_change_with_delayed_commits_on_half_of_the_nodes_and_restart_of_the_other_half.py @@ -14,8 +14,8 @@ def tconf(tconf): def test_view_change_with_delayed_commits_on_half_of_the_nodes_and_restart_of_the_other_half(txnPoolNodeSet, looper, - sdk_pool_handle, - sdk_wallet_client, tconf, + vdr_pool_handle, + vdr_wallet_client, tconf, tdir, allPluginsPath): """ Order transactions on half of the pool @@ -34,8 +34,8 @@ def test_view_change_with_delayed_commits_on_half_of_the_nodes_and_restart_of_th old_view_no=slow_nodes[0].viewNo, old_last_ordered=slow_nodes[0].master_replica.last_ordered_3pc, looper=looper, - sdk_pool_handle=sdk_pool_handle, - sdk_wallet_client=sdk_wallet_client, + sdk_pool_handle=vdr_pool_handle, + sdk_wallet_client=vdr_wallet_client, tconf=tconf, tdir=tdir, all_plugins_path=allPluginsPath, diff --git a/plenum/test/view_change_with_delays/test_view_change_with_delayed_commits_on_one_node_and_restart_of_other_nodes.py b/plenum/test/view_change_with_delays/test_view_change_with_delayed_commits_on_one_node_and_restart_of_other_nodes.py index afe41c06dd..a3c8c56024 100644 --- a/plenum/test/view_change_with_delays/test_view_change_with_delayed_commits_on_one_node_and_restart_of_other_nodes.py +++ b/plenum/test/view_change_with_delays/test_view_change_with_delayed_commits_on_one_node_and_restart_of_other_nodes.py @@ -14,7 +14,7 @@ def tconf(tconf): def test_view_change_with_delayed_commits_on_one_node_and_restart_of_other_nodes(txnPoolNodeSet, looper, - sdk_pool_handle, sdk_wallet_client, + vdr_pool_handle, vdr_wallet_client, tconf, tdir, allPluginsPath): """ Order transactions on all but one node by delaying commits on it @@ -33,8 +33,8 @@ def test_view_change_with_delayed_commits_on_one_node_and_restart_of_other_nodes old_view_no=slow_nodes[0].viewNo, old_last_ordered=slow_nodes[0].master_replica.last_ordered_3pc, looper=looper, - sdk_pool_handle=sdk_pool_handle, - sdk_wallet_client=sdk_wallet_client, + sdk_pool_handle=vdr_pool_handle, + sdk_wallet_client=vdr_wallet_client, tconf=tconf, tdir=tdir, all_plugins_path=allPluginsPath, diff --git a/plenum/test/view_change_with_delays/test_view_change_with_different_prepare_certificate.py b/plenum/test/view_change_with_delays/test_view_change_with_different_prepare_certificate.py index 1d8a950fef..998a3d4d35 100644 --- a/plenum/test/view_change_with_delays/test_view_change_with_different_prepare_certificate.py +++ b/plenum/test/view_change_with_delays/test_view_change_with_different_prepare_certificate.py @@ -2,10 +2,10 @@ from plenum.common.constants import PREPREPARE from plenum.test.delayers import ppDelay, msg_rep_delay -from plenum.test.helper import sdk_send_random_and_check, \ - sdk_send_random_request +from plenum.test.helper import vdr_send_random_and_check, \ + vdr_send_random_request from plenum.test.node_catchup.helper import ensure_all_nodes_have_same_data -from plenum.test.node_request.helper import sdk_ensure_pool_functional +from plenum.test.node_request.helper import vdr_ensure_pool_functional from plenum.test.stasher import delay_rules from plenum.test.test_node import ensureElectionsDone from plenum.test.view_change.helper import check_prepare_certificate @@ -14,14 +14,14 @@ def test_view_change_with_different_prepare_certificate(looper, txnPoolNodeSet, - sdk_pool_handle, - sdk_wallet_client): + vdr_pool_handle, + vdr_wallet_client): """ Check that a node without pre-prepare but with quorum of prepares wouldn't use this transaction as a last in prepare certificate """ - sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle, - sdk_wallet_client, 1) + vdr_send_random_and_check(looper, txnPoolNodeSet, vdr_pool_handle, + vdr_wallet_client, 1) slow_node = txnPoolNodeSet[-1] # delay preprepares and message response with preprepares. with delay_rules(slow_node.nodeIbStasher, ppDelay(delay=sys.maxsize)): @@ -29,7 +29,7 @@ def test_view_change_with_different_prepare_certificate(looper, txnPoolNodeSet, msg_rep_delay(delay=sys.maxsize, types_to_delay=[PREPREPARE, ])): last_ordered = slow_node.master_replica.last_ordered_3pc - sdk_send_random_request(looper, sdk_pool_handle, sdk_wallet_client) + vdr_send_random_request(looper, vdr_pool_handle, vdr_wallet_client) looper.run(eventually(check_prepare_certificate, txnPoolNodeSet[0:-1], last_ordered[1] + 1)) @@ -40,4 +40,4 @@ def test_view_change_with_different_prepare_certificate(looper, txnPoolNodeSet, ensureElectionsDone(looper, txnPoolNodeSet) ensure_all_nodes_have_same_data(looper, txnPoolNodeSet) - sdk_ensure_pool_functional(looper, txnPoolNodeSet, sdk_wallet_client, sdk_pool_handle) + vdr_ensure_pool_functional(looper, txnPoolNodeSet, vdr_wallet_client, vdr_pool_handle) diff --git a/plenum/test/view_change_with_delays/test_view_change_with_propagate_primary_on_one_delayed_node.py b/plenum/test/view_change_with_delays/test_view_change_with_propagate_primary_on_one_delayed_node.py index c98f43f372..32f5718336 100644 --- a/plenum/test/view_change_with_delays/test_view_change_with_propagate_primary_on_one_delayed_node.py +++ b/plenum/test/view_change_with_delays/test_view_change_with_propagate_primary_on_one_delayed_node.py @@ -20,7 +20,7 @@ def tconf(tconf): def test_view_change_with_propagate_primary_on_one_delayed_node( - txnPoolNodeSet, looper, sdk_pool_handle, sdk_wallet_client, tconf): + txnPoolNodeSet, looper, vdr_pool_handle, vdr_wallet_client, tconf): """ Perform view change on all the nodes except for one slow node and then propagate primary on it so that delayed Commits are processed by the slow @@ -28,6 +28,6 @@ def test_view_change_with_propagate_primary_on_one_delayed_node( verify that all the nodes have the same ledgers and state. """ do_view_change_with_propagate_primary_on_one_delayed_node( - txnPoolNodeSet[-1], txnPoolNodeSet, looper, sdk_pool_handle, sdk_wallet_client) + txnPoolNodeSet[-1], txnPoolNodeSet, looper, vdr_pool_handle, vdr_wallet_client) ensure_all_nodes_have_same_data(looper, txnPoolNodeSet) diff --git a/plenum/test/view_change_with_delays/test_view_change_with_unaligned_prepare_certificates_on_half_nodes.py b/plenum/test/view_change_with_delays/test_view_change_with_unaligned_prepare_certificates_on_half_nodes.py index 86780e0ebd..b776fd2a71 100644 --- a/plenum/test/view_change_with_delays/test_view_change_with_unaligned_prepare_certificates_on_half_nodes.py +++ b/plenum/test/view_change_with_delays/test_view_change_with_unaligned_prepare_certificates_on_half_nodes.py @@ -17,12 +17,12 @@ def tconf(tconf): def test_view_change_with_unaligned_prepare_certificates_on_half_nodes( - txnPoolNodeSet, looper, sdk_pool_handle, sdk_wallet_client, tconf): + txnPoolNodeSet, looper, vdr_pool_handle, vdr_wallet_client, tconf): """ Perform view change with half nodes reaching lower last prepared certificate than others. With current implementation of view change this can result with view change taking a lot of time. """ do_view_change_with_unaligned_prepare_certificates(txnPoolNodeSet[2:], - txnPoolNodeSet, looper, sdk_pool_handle, sdk_wallet_client) + txnPoolNodeSet, looper, vdr_pool_handle, vdr_wallet_client) ensure_all_nodes_have_same_data(looper, txnPoolNodeSet) diff --git a/plenum/test/view_change_with_delays/test_view_change_with_unaligned_prepare_certificates_on_one_node.py b/plenum/test/view_change_with_delays/test_view_change_with_unaligned_prepare_certificates_on_one_node.py index 84ba05ba8d..b5c7475bfb 100644 --- a/plenum/test/view_change_with_delays/test_view_change_with_unaligned_prepare_certificates_on_one_node.py +++ b/plenum/test/view_change_with_delays/test_view_change_with_unaligned_prepare_certificates_on_one_node.py @@ -17,11 +17,11 @@ def tconf(tconf): def test_view_change_with_unaligned_prepare_certificates_on_one_node( - txnPoolNodeSet, looper, sdk_pool_handle, sdk_wallet_client, tconf): + txnPoolNodeSet, looper, vdr_pool_handle, vdr_wallet_client, tconf): """ Perform view change with only one node reaching lower last prepared certificate than others. """ do_view_change_with_unaligned_prepare_certificates(txnPoolNodeSet[3:], - txnPoolNodeSet, looper, sdk_pool_handle, sdk_wallet_client) + txnPoolNodeSet, looper, vdr_pool_handle, vdr_wallet_client) ensure_all_nodes_have_same_data(looper, txnPoolNodeSet) diff --git a/plenum/test/wallet_helper.py b/plenum/test/wallet_helper.py new file mode 100644 index 0000000000..88b6ba09a7 --- /dev/null +++ b/plenum/test/wallet_helper.py @@ -0,0 +1,116 @@ +import base58 +import json +import os + +from indy_vdr import ledger, open_pool, set_protocol_version +from indy_vdr.bindings import do_call_async +from aries_askar import Store, Key, KeyAlg, AskarError, AskarErrorCode +from indy_credx import Schema, CredentialDefinition, RevocationRegistryDefinition +from indy_vdr.error import VdrError + +# TODO: This code is copied from indy-test-automation, we should move it to a common place +# and use it from there in both places + +MODULE_PATH = os.path.abspath(os.path.dirname(__file__)) +POOL_GENESIS_PATH = os.path.join(MODULE_PATH, 'docker_genesis') + + +def key_helper(seed=None): + """ + Generate a new keypair and DID + """ + alg = KeyAlg.ED25519 + if seed: + keypair = Key.from_secret_bytes(alg, seed) + else: + keypair = Key.generate(alg) + verkey_bytes = keypair.get_public_bytes() + verkey = base58.b58encode(verkey_bytes).decode("ascii") + did = base58.b58encode(verkey_bytes[:16]).decode("ascii") + return keypair, did, verkey + + +async def key_insert_helper(wallet_handle, keypair, did, verkey): + ''' + Insert a keypair into the wallet + ''' + try: + await wallet_handle.insert_key(verkey, keypair, metadata=json.dumps({})) + except AskarError as err: + if err.code == AskarErrorCode.DUPLICATE: + pass + else: + raise err + item = await wallet_handle.fetch("did", did, for_update=True) + if item: + did_info = item.value_json + if did_info.get("verkey") != verkey: + raise Exception("DID already present in wallet") + did_info["metadata"] = {} + await wallet_handle.replace("did", did, value_json=did_info, tags=item.tags) + else: + await wallet_handle.insert( + "did", + did, + value_json={ + "did": did, + "method": "sov", + "verkey": verkey, + "verkey_type": "ed25519", + "metadata": {}, + }, + tags={ + "method": "sov", + "verkey": verkey, + "verkey_type": "ed25519", + }, + ) + + +async def vdr_create_and_store_did(wallet_handle, seed=None): + ''' + Create a new DID and store it in the wallet + ''' + keypair, did, verkey = key_helper(seed=seed) + await key_insert_helper(wallet_handle, keypair, did, verkey) + return did, verkey + +async def vdr_wallet_helper(wallet_key='', wallet_key_derivation_method='kdf:argon2i:mod'): + wuri = "sqlite://:memory:" + wallet_h = await Store.provision(wuri, wallet_key_derivation_method, wallet_key, recreate=False) + session_handle = await wallet_h.session() + wallet_config = json.dumps({"id": wuri}) + wallet_credentials = json.dumps({"key": wallet_key, "key_derivation_method": wallet_key_derivation_method}) + + return session_handle, wallet_config, wallet_credentials + +async def vdr_pool_helper(path_to_genesis=POOL_GENESIS_PATH): + set_protocol_version(2) + pool_handle = await open_pool(transactions_path=path_to_genesis) + return pool_handle, "default_pool_name" + +async def vdr_get_did_signing_key(wallet_handle, did): + item = await wallet_handle.fetch("did", did, for_update=False) + if item: + kp = await wallet_handle.fetch_key(item.value_json.get("verkey")) + return kp.key + return None + +async def vdr_sign_request(wallet_handle, submitter_did, req): + key = await vdr_get_did_signing_key(wallet_handle, submitter_did) + if not key: + raise Exception(f"Key for DID {submitter_did} is empty") + req.set_signature(key.sign_message(req.signature_input)) + return req + +async def vdr_sign_and_submit_request(pool_handle, wallet_handle, submitter_did, req): + sreq = await vdr_sign_request(wallet_handle, submitter_did, req) + request_result = await pool_handle.submit_request(sreq) + return request_result + +async def vdr_multi_sign_request(wallet_handle, submitter_did, req): + key = await vdr_get_did_signing_key(wallet_handle, submitter_did) + if not key: + raise Exception(f"Key for DID {submitter_did} is empty") + req.set_multi_signature(submitter_did, key.sign_message(req.signature_input)) + return req \ No newline at end of file diff --git a/plenum/test/watermarks/test_watermarks_after_view_change.py b/plenum/test/watermarks/test_watermarks_after_view_change.py index e1c0f7eef2..d84dfd3984 100644 --- a/plenum/test/watermarks/test_watermarks_after_view_change.py +++ b/plenum/test/watermarks/test_watermarks_after_view_change.py @@ -2,7 +2,7 @@ from plenum.test import waits from plenum.test.delayers import cDelay, chk_delay, icDelay, nv_delay -from plenum.test.helper import sdk_send_random_and_check, waitForViewChange +from plenum.test.helper import vdr_send_random_and_check, waitForViewChange from plenum.test.node_catchup.helper import ensure_all_nodes_have_same_data from plenum.test.stasher import delay_rules from plenum.test.view_change_service.helper import trigger_view_change @@ -31,8 +31,8 @@ def tconf(tconf): def test_watermarks_after_view_change(tdir, tconf, looper, txnPoolNodeSet, - sdk_pool_handle, - sdk_wallet_client): + vdr_pool_handle, + vdr_wallet_client): """ Delay commit, checkpoint, InstanceChange and ViewChangeDone messages for lagging_node. Start ViewChange. @@ -50,9 +50,9 @@ def test_watermarks_after_view_change(tdir, tconf, expectedViewNo=start_view_no + 1, customTimeout=waits.expectedPoolViewChangeStartedTimeout(len(txnPoolNodeSet))) ensure_all_nodes_have_same_data(looper, txnPoolNodeSet[:-1]) - sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle, - sdk_wallet_client, 6) + vdr_send_random_and_check(looper, txnPoolNodeSet, vdr_pool_handle, + vdr_wallet_client, 6) ensure_all_nodes_have_same_data(looper, txnPoolNodeSet) - sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle, - sdk_wallet_client, 1) + vdr_send_random_and_check(looper, txnPoolNodeSet, vdr_pool_handle, + vdr_wallet_client, 1) ensure_all_nodes_have_same_data(looper, txnPoolNodeSet) diff --git a/plenum/test/zstack_tests/test_clientstack_restart_trigger.py b/plenum/test/zstack_tests/test_clientstack_restart_trigger.py index 90f34357f8..dcfd4c04e4 100644 --- a/plenum/test/zstack_tests/test_clientstack_restart_trigger.py +++ b/plenum/test/zstack_tests/test_clientstack_restart_trigger.py @@ -40,8 +40,8 @@ def patch_stack_restart(node): def revert_origin_back(node, orig_restart): node.clientstack.restart = types.MethodType(orig_restart, node.clientstack) -def test_clientstack_restart_not_triggered(tconf, create_node_and_not_start): - node = create_node_and_not_start +def test_clientstack_restart_not_triggered(tconf, vdr_create_node_and_not_start): + node = vdr_create_node_and_not_start global is_restarted is_restarted = False @@ -56,8 +56,8 @@ def test_clientstack_restart_not_triggered(tconf, create_node_and_not_start): revert_origin_back(node, orig_restart) -def test_clientstack_restart_triggered(tconf, create_node_and_not_start): - node = create_node_and_not_start +def test_clientstack_restart_triggered(tconf, vdr_create_node_and_not_start): + node = vdr_create_node_and_not_start global is_restarted is_restarted = False @@ -78,8 +78,8 @@ def test_clientstack_restart_triggered(tconf, create_node_and_not_start): revert_origin_back(node, orig_restart) -def test_clientstack_restart_trigger_delayed(tconf, looper, create_node_and_not_start): - node = create_node_and_not_start +def test_clientstack_restart_trigger_delayed(tconf, looper, vdr_create_node_and_not_start): + node = vdr_create_node_and_not_start global is_restarted is_restarted = False diff --git a/plenum/test/zstack_tests/test_restart_clientstack_before_reply_on_2_of_4_nodes.py b/plenum/test/zstack_tests/test_restart_clientstack_before_reply_on_2_of_4_nodes.py index 7e0b6cc198..01b7fc81ec 100644 --- a/plenum/test/zstack_tests/test_restart_clientstack_before_reply_on_2_of_4_nodes.py +++ b/plenum/test/zstack_tests/test_restart_clientstack_before_reply_on_2_of_4_nodes.py @@ -1,13 +1,13 @@ import types -from plenum.test.helper import sdk_send_random_and_check +from plenum.test.helper import vdr_send_random_and_check from plenum.test.test_node import TestNode def test_restart_clientstack_before_reply_on_2_of_4_nodes(looper, txnPoolNodeSet, - sdk_pool_handle, - sdk_wallet_steward): + vdr_pool_handle, + vdr_wallet_steward): orig_send_reply = TestNode.sendReplyToClient def send_after_restart(self, reply, reqKey): self.restart_clientstack() @@ -23,9 +23,9 @@ def revert_origin_back(): node) patch_sendReplyToClient() - sdk_send_random_and_check(looper, + vdr_send_random_and_check(looper, txnPoolNodeSet, - sdk_pool_handle, - sdk_wallet_steward, + vdr_pool_handle, + vdr_wallet_steward, 1) revert_origin_back() \ No newline at end of file diff --git a/plenum/test/zstack_tests/test_restart_clientstack_before_reply_on_3_of_4_nodes.py b/plenum/test/zstack_tests/test_restart_clientstack_before_reply_on_3_of_4_nodes.py index 9a5e596066..717dac604c 100644 --- a/plenum/test/zstack_tests/test_restart_clientstack_before_reply_on_3_of_4_nodes.py +++ b/plenum/test/zstack_tests/test_restart_clientstack_before_reply_on_3_of_4_nodes.py @@ -1,13 +1,13 @@ import types -from plenum.test.helper import sdk_send_random_and_check +from plenum.test.helper import vdr_send_random_and_check from plenum.test.test_node import TestNode def test_restart_clientstack_before_reply_on_3_of_4_nodes(looper, txnPoolNodeSet, - sdk_pool_handle, - sdk_wallet_steward): + vdr_pool_handle, + vdr_wallet_steward): orig_send_reply = TestNode.sendReplyToClient def send_after_restart(self, reply, reqKey): self.restart_clientstack() @@ -23,9 +23,9 @@ def revert_origin_back(): node) patch_sendReplyToClient() - sdk_send_random_and_check(looper, + vdr_send_random_and_check(looper, txnPoolNodeSet, - sdk_pool_handle, - sdk_wallet_steward, + vdr_pool_handle, + vdr_wallet_steward, 1) revert_origin_back() diff --git a/plenum/test/zstack_tests/test_restart_clientstack_before_reply_on_4_of_4_nodes.py b/plenum/test/zstack_tests/test_restart_clientstack_before_reply_on_4_of_4_nodes.py index 9f67655a82..3e4338a7fd 100644 --- a/plenum/test/zstack_tests/test_restart_clientstack_before_reply_on_4_of_4_nodes.py +++ b/plenum/test/zstack_tests/test_restart_clientstack_before_reply_on_4_of_4_nodes.py @@ -1,13 +1,13 @@ import types -from plenum.test.helper import sdk_send_random_and_check +from plenum.test.helper import vdr_send_random_and_check from plenum.test.test_node import TestNode def test_restart_clientstack_before_reply_on_4_of_4_nodes(looper, txnPoolNodeSet, - sdk_pool_handle, - sdk_wallet_steward): + vdr_pool_handle, + vdr_wallet_steward): orig_send_reply = TestNode.sendReplyToClient def send_after_restart(self, reply, reqKey): self.restart_clientstack() @@ -23,9 +23,9 @@ def revert_origin_back(): node) patch_sendReplyToClient() - sdk_send_random_and_check(looper, + vdr_send_random_and_check(looper, txnPoolNodeSet, - sdk_pool_handle, - sdk_wallet_steward, + vdr_pool_handle, + vdr_wallet_steward, 1) revert_origin_back() diff --git a/plenum/test/zstack_tests/test_send_client_msgs_with_delay_reqs.py b/plenum/test/zstack_tests/test_send_client_msgs_with_delay_reqs.py index 8dc0c3e035..d59a15cebd 100644 --- a/plenum/test/zstack_tests/test_send_client_msgs_with_delay_reqs.py +++ b/plenum/test/zstack_tests/test_send_client_msgs_with_delay_reqs.py @@ -3,9 +3,9 @@ from plenum.common.constants import TRUSTEE_STRING from plenum.common.exceptions import RequestRejectedException -from plenum.test.helper import sdk_get_and_check_replies, assertExp, sdk_send_random_requests +from plenum.test.helper import vdr_get_and_check_replies, assertExp, vdr_send_random_requests from plenum.test.node_catchup.helper import ensure_all_nodes_have_same_data, waitNodeDataEquality -from plenum.test.pool_transactions.helper import sdk_add_new_nym, sdk_add_new_steward_and_node, sdk_pool_refresh +from plenum.test.pool_transactions.helper import vdr_add_new_nym, vdr_add_new_steward_and_node, vdr_pool_refresh from plenum.test.test_node import checkNodesConnected from stp_core.common.log import getlogger from stp_core.loop.eventually import eventually @@ -15,9 +15,9 @@ def test_resending_pending_client_msgs(looper, txnPoolNodeSet, - sdk_pool_handle, - sdk_wallet_client, - sdk_wallet_steward, + vdr_pool_handle, + vdr_wallet_client, + vdr_wallet_steward, tdir, tconf, allPluginsPath, monkeypatch): problem_node = txnPoolNodeSet[1] @@ -34,9 +34,9 @@ def fail_send_multipart(msg_parts, flags=0, copy=True, track=False, **kwargs): start_master_last_ordered_3pc = txnPoolNodeSet[0].master_last_ordered_3PC[1] # Send the first request. Nodes should reject it. - resp_task = sdk_add_new_nym(looper, - sdk_pool_handle, - sdk_wallet_client, + resp_task = vdr_add_new_nym(looper, + vdr_pool_handle, + vdr_wallet_client, role=TRUSTEE_STRING, no_wait=True) looper.run( @@ -47,12 +47,12 @@ def fail_send_multipart(msg_parts, flags=0, copy=True, track=False, **kwargs): monkeypatch.delattr(problem_node.clientstack.listener, 'send_multipart', raising=True) # Send the second request. - sdk_reqs = sdk_send_random_requests(looper, sdk_pool_handle, sdk_wallet_client, 1) + sdk_reqs = vdr_send_random_requests(looper, vdr_pool_handle, vdr_wallet_client, 1) # Waiting reject for the first request, which will sent with a reply for the second request. with pytest.raises(RequestRejectedException, match="Only Steward is allowed to do these transactions"): - _, resp = sdk_get_and_check_replies(looper, [resp_task])[0] + _, resp = vdr_get_and_check_replies(looper, [resp_task])[0] # Waiting a rely for the second request - sdk_get_and_check_replies(looper, sdk_reqs) + vdr_get_and_check_replies(looper, sdk_reqs) monkeypatch.undo() diff --git a/plenum/test/zstack_tests/test_send_too_many_reqs.py b/plenum/test/zstack_tests/test_send_too_many_reqs.py index a6e8c35698..b1acafbd01 100644 --- a/plenum/test/zstack_tests/test_send_too_many_reqs.py +++ b/plenum/test/zstack_tests/test_send_too_many_reqs.py @@ -3,7 +3,7 @@ import pytest from plenum.common.exceptions import PoolLedgerTimeoutException -from plenum.test.helper import sdk_send_random_and_check +from plenum.test.helper import vdr_send_random_and_check from stp_core.common.log import getlogger logger = getlogger() @@ -26,13 +26,13 @@ def limitTestRunningTime(): @pytest.mark.skip(reason="Too much request. Needs for checking future implementation") def test_send_too_much_reqs(looper, txnPoolNodeSet, - sdk_pool_handle, - sdk_wallet_steward): + vdr_pool_handle, + vdr_wallet_steward): for _ in range(TXN_COUNT): - sdk_send_random_and_check(looper, + vdr_send_random_and_check(looper, txnPoolNodeSet, - sdk_pool_handle, - sdk_wallet_steward, + vdr_pool_handle, + vdr_wallet_steward, 1) @@ -58,17 +58,17 @@ def check_perf(self): @pytest.mark.skip(reason="Too much request. Needs for checking future implementation") def test_send_with_clientstack_restarts(looper, patched_pool_set, - sdk_pool_handle, - sdk_wallet_steward): + vdr_pool_handle, + vdr_wallet_steward): success_txns = 0 failed_txns = 0 for _ in range(int(TXN_COUNT)): try: - sdk_send_random_and_check(looper, + vdr_send_random_and_check(looper, patched_pool_set, - sdk_pool_handle, - sdk_wallet_steward, + vdr_pool_handle, + vdr_wallet_steward, 1) except PoolLedgerTimeoutException: failed_txns += 1 diff --git a/plenum/test/zstack_tests/test_zstack_reconnection.py b/plenum/test/zstack_tests/test_zstack_reconnection.py index aa30d55786..fa1b4da18d 100644 --- a/plenum/test/zstack_tests/test_zstack_reconnection.py +++ b/plenum/test/zstack_tests/test_zstack_reconnection.py @@ -3,7 +3,7 @@ from stp_core.common.log import getlogger from stp_core.loop.eventually import eventually -from plenum.test.helper import stopNodes, sdk_send_random_and_check +from plenum.test.helper import stopNodes, vdr_send_random_and_check from plenum.test.test_node import TestNode, ensureElectionsDone from plenum.common.config_helper import PNodeConfigHelper @@ -19,11 +19,11 @@ def tconf(tconf): def testZStackNodeReconnection(tconf, looper, txnPoolNodeSet, - sdk_pool_handle, - sdk_wallet_client, + vdr_pool_handle, + vdr_wallet_client, tdir): - sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle, - sdk_wallet_client, 1) + vdr_send_random_and_check(looper, txnPoolNodeSet, vdr_pool_handle, + vdr_wallet_client, 1) npr = [n for n in txnPoolNodeSet if not n.hasPrimary] nodeToCrash = npr[0] @@ -63,5 +63,5 @@ def checkFlakyConnected(conn=True): ensure_all_nodes_have_same_data(looper, nodes=txnPoolNodeSet, exclude_from_check=['check_last_ordered_3pc_backup']) - sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle, - sdk_wallet_client, 10) + vdr_send_random_and_check(looper, txnPoolNodeSet, vdr_pool_handle, + vdr_wallet_client, 10) diff --git a/scripts/add_json_txns_to_ledger.py b/scripts/add_json_txns_to_ledger.py index 8c1d0e2df4..d66539af85 100644 --- a/scripts/add_json_txns_to_ledger.py +++ b/scripts/add_json_txns_to_ledger.py @@ -6,9 +6,9 @@ import argparse from stp_core.types import HA -from indy_common.config_util import getConfig +from plenum.common.config_util import getConfig from plenum.server.node import Node -from indy_common.config_helper import NodeConfigHelper +from plenum.common.config_helper import NodeConfigHelper config = getConfig() diff --git a/scripts/generate_txns.py b/scripts/generate_txns.py index be586a6c9c..e8c3ef3300 100644 --- a/scripts/generate_txns.py +++ b/scripts/generate_txns.py @@ -8,8 +8,7 @@ from contextlib import ExitStack from typing import Sequence -from indy import did, wallet -from indy.ledger import sign_request +from plenum.test.wallet_helper import vdr_create_and_store_did, vdr_sign_request, vdr_wallet_helper from plenum.common.config_util import getConfig from plenum.common.constants import CURRENT_PROTOCOL_VERSION @@ -22,25 +21,12 @@ async def get_wallet_and_pool(): - pool_name = 'pool' + randomString(3) - wallet_name = 'wallet' + randomString(10) - their_wallet_name = 'their_wallet' + randomString(10) seed_trustee1 = "000000000000000000000000Trustee1" - await wallet.create_wallet(pool_name, wallet_name, None, None, None) - my_wallet_handle = await wallet.open_wallet(wallet_name, None, None) + wallet_handle, _, _ = await vdr_wallet_helper() + did, _ = await vdr_create_and_store_did(wallet_handle, seed_trustee1) - await wallet.create_wallet(pool_name, their_wallet_name, None, None, None) - their_wallet_handle = await wallet.open_wallet(their_wallet_name, None, None) - - await did.create_and_store_my_did(my_wallet_handle, "{}") - - (their_did, their_verkey) = await did.create_and_store_my_did(their_wallet_handle, - json.dumps({"seed": seed_trustee1})) - - await did.store_their_did(my_wallet_handle, json.dumps({'did': their_did, 'verkey': their_verkey})) - - return their_wallet_handle, their_did + return wallet_handle, did def randomOperation(): @@ -67,7 +53,7 @@ def sdk_random_request_objects(count, protocol_version, identifier=None): def sdk_sign_request_objects(looper, sdk_wallet, reqs: Sequence): wallet_h, did = sdk_wallet reqs_str = [json.dumps(req.as_dict) for req in reqs] - resp = [looper.loop.run_until_complete(sign_request(wallet_h, did, req)) for req in reqs_str] + resp = [looper.loop.run_until_complete(vdr_sign_request(wallet_h, did, req)) for req in reqs_str] return resp diff --git a/setup.py b/setup.py index 96aa05298d..02bcab1062 100644 --- a/setup.py +++ b/setup.py @@ -27,8 +27,8 @@ with open(metadata['__file__'], 'r') as f: exec(f.read(), metadata) -tests_require = ['attrs==20.3.0', 'pytest==6.2.5', 'pytest-xdist==2.2.1', 'pytest-forked==1.3.0', - 'python3-indy==1.16.0.post236', 'pytest-asyncio==0.14.0'] +tests_require = ['attrs==20.3.0', 'pytest==6.2.5', 'pytest-xdist==2.2.0', 'pytest-forked==1.4.0', 'python3-indy==1.16.0.post236', + 'pytest-asyncio==0.14.0', 'indy_vdr==0.4.2', 'aries-askar==0.4.3', 'indy-credx==1.1.1', 'ioflo==2.0.3'] class PyZMQCommand(distutils.cmd.Command): @@ -100,7 +100,6 @@ def run(self): # pinned because issue with fpm from v4.0.0 'importlib_metadata==3.10.1', # 'ioflo==2.0.2', - 'ioflo', # 'jsonpickle==2.0.0', 'jsonpickle', # 'leveldb==0.201', @@ -136,6 +135,7 @@ def run(self): 'sortedcontainers==2.1.0', ### Tests fail without version pin (GHA run: https://github.com/udosson/indy-plenum/actions/runs/1078741118) 'ujson==1.33', + 'ioflo==2.0.3' ], setup_requires=['pytest-runner==5.3.0'],