diff --git a/.github/labeler.yml b/.github/labeler.yml new file mode 100644 index 0000000..23f44c4 --- /dev/null +++ b/.github/labeler.yml @@ -0,0 +1,2 @@ +"documentation": + - /**/*.adoc \ No newline at end of file diff --git a/.github/release-drafter.yml b/.github/release-drafter.yml new file mode 100644 index 0000000..c4810a7 --- /dev/null +++ b/.github/release-drafter.yml @@ -0,0 +1,34 @@ +name-template: $NEXT_PATCH_VERSION +tag-template: $NEXT_PATCH_VERSION + +template: | + # Changes + $CHANGES + +# -------- +# NOTE: When adding new labels please also update required-labels.yml workflow. +# -------- +categories: + - title: 💣️ Breaking changes + label: breaking-change + + - title: 🚀 Features & Enhancements + labels: + - feature + - enhancement + + - title: 🐞 Fixes + label: bug + + - title: 📁 Java Dependencies updates + label: dependencies + + - title: 📁 Docker images updates + label: docker-update-images + + - title: 📖 Documentation + label: documentation + + - title: 🏡 Housekeeping + label: housekeeping + diff --git a/.github/workflows/changelog-release-drafter.yml b/.github/workflows/changelog-release-drafter.yml new file mode 100644 index 0000000..8886f47 --- /dev/null +++ b/.github/workflows/changelog-release-drafter.yml @@ -0,0 +1,14 @@ +name: Changelog Release Drafter + +on: + push: + branches: + - develop + +jobs: + update_release_draft: + runs-on: ubuntu-latest + steps: + - uses: release-drafter/release-drafter@v5 + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} diff --git a/.github/workflows/codeql-analysis.yml b/.github/workflows/codeql-analysis.yml new file mode 100644 index 0000000..40cc30d --- /dev/null +++ b/.github/workflows/codeql-analysis.yml @@ -0,0 +1,27 @@ +name: "Trivy" + +on: + schedule: + - cron: '24 10 * * 5' + +jobs: + build: + name: Trivy vulnerability scanner + runs-on: ubuntu-latest + steps: + - name: Checkout code + uses: actions/checkout@v4 + + - name: Run Trivy vulnerability scanner in repo mode + uses: aquasecurity/trivy-action@master + with: + scan-type: 'fs' + ignore-unfixed: true + format: 'sarif' + output: 'trivy-results.sarif' + severity: 'CRITICAL' + + - name: Upload Trivy scan results to GitHub Security tab + uses: github/codeql-action/upload-sarif@v2 + with: + sarif_file: 'trivy-results.sarif' diff --git a/.github/workflows/labeler.yml b/.github/workflows/labeler.yml new file mode 100644 index 0000000..3807d5a --- /dev/null +++ b/.github/workflows/labeler.yml @@ -0,0 +1,14 @@ +name: "Pull Request Auto Labeler" +on: + - pull_request_target + +jobs: + triage: + permissions: + contents: read + pull-requests: write + runs-on: ubuntu-latest + steps: + - uses: actions/labeler@v4 + with: + repo-token: "${{ secrets.GITHUB_TOKEN }}" \ No newline at end of file diff --git a/.github/workflows/maven.yml b/.github/workflows/maven.yml new file mode 100644 index 0000000..c6524ca --- /dev/null +++ b/.github/workflows/maven.yml @@ -0,0 +1,39 @@ +# This workflow will build a Java project with Maven +# For more information see: https://help.github.com/actions/language-and-framework-guides/building-and-testing-java-with-maven + +name: Java CI with Maven + +on: + push: + branches: + - develop + pull_request: + branches: + - develop + +jobs: + build-jdk17: + runs-on: ubuntu-latest + name: Build project + concurrency: + # The commit SHA or the branch name of the pull request. See: https://docs.github.com/en/actions/reference/context-and-expression-syntax-for-github-actions + group: ${{ github.event_name == 'pull_request' && github.head_ref || github.sha}} + cancel-in-progress: true + steps: + - name: Checkout repository + uses: actions/checkout@v4 + with: + fetch-depth: 0 + - name: Cache Maven packages + uses: actions/cache@v3 + with: + path: ~/.m2 + key: ${{ runner.os }}-m2-${{ hashFiles('**/pom.xml') }} + restore-keys: ${{ runner.os }}-m2 + - name: Set up JDK + uses: actions/setup-java@v3 + with: + distribution: 'zulu' + java-version: '17' + - name: Build with Maven + run: ./mvnw -version && whoami && umask -S && umask a+rw && umask -S && ./mvnw clean verify -P docker-clean -Dhttp.keepAlive=false -Dmaven.wagon.http.pool=false -Dmaven.wagon.http.retryHandler.count=3 --no-snapshot-updates --batch-mode --no-transfer-progress \ No newline at end of file diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml new file mode 100644 index 0000000..c318d82 --- /dev/null +++ b/.github/workflows/release.yml @@ -0,0 +1,47 @@ +name: Publish to the Maven Central Repository + +on: + release: + types: [ published ] + +jobs: + publish: + runs-on: ubuntu-latest + steps: + - name: Checkout + uses: actions/checkout@v4 + with: + ref: ${{github.event.release.target_commitish}} + token: ${{ secrets.RELEASE_PERSONAL_ACCESS_TOKEN }} + + - name: Set up JDK + uses: actions/setup-java@v3 + with: + distribution: 'zulu' + java-version: '17' + server-id: ossrh + server-username: MAVEN_USERNAME + server-password: MAVEN_PASSWORD + gpg-private-key: ${{ secrets.GPG_PRIVATE_KEY }} + gpg-passphrase: MAVEN_GPG_PASSPHRASE + cache: 'maven' + + - name: Update version + if: ${{ success() }} + run: ./mvnw --batch-mode --no-transfer-progress versions:set -DnewVersion=${{github.event.release.tag_name}} versions:commit + + - name: Publish to the Maven Central Repository + if: ${{ success() }} + run: ./mvnw --batch-mode --no-transfer-progress -Dgib.disable=true -P ossrh -DskipTests deploy + env: + MAVEN_USERNAME: ${{ secrets.OSSRH_USERNAME }} + MAVEN_PASSWORD: ${{ secrets.OSSRH_TOKEN }} + MAVEN_GPG_PASSPHRASE: ${{ secrets.GPG_PASSPHRASE }} + + - name: Commit & Push changes + if: ${{ success() }} + uses: actions-js/push@master + with: + github_token: ${{ secrets.GITHUB_TOKEN }} + message: 'Release ${{github.event.release.tag_name}}' + branch: ${{ github.event.release.target_commitish }} diff --git a/.github/workflows/renovate.yml b/.github/workflows/renovate.yml new file mode 100644 index 0000000..e31de9e --- /dev/null +++ b/.github/workflows/renovate.yml @@ -0,0 +1,39 @@ +name: Renovate for update docker images + +on: + workflow_dispatch: + inputs: + dryRun: + description: "Dry-Run" + default: false + required: false + type: boolean + logLevel: + description: "Log-Level" + required: false + default: 'debug' + type: choice + options: + - info + - warn + - debug + - error + - fatal + schedule: + - cron: '0 8 * * *' + +jobs: + renovate: + runs-on: ubuntu-latest + steps: + - name: Checkout + uses: actions/checkout@v4 + + - name: Self-hosted Renovate + uses: renovatebot/github-action@v39.0.5 + with: + configurationFile: .github/renovate/renovate.json + token: ${{ secrets.RELEASE_PERSONAL_ACCESS_TOKEN }} + env: + DRY_RUN: ${{ inputs.dryRun || 'false' }} + LOG_LEVEL: ${{ inputs.logLevel || 'debug' }} \ No newline at end of file diff --git a/.github/workflows/required-labels.yml b/.github/workflows/required-labels.yml new file mode 100644 index 0000000..96110b2 --- /dev/null +++ b/.github/workflows/required-labels.yml @@ -0,0 +1,16 @@ +# https://github.com/mheap/github-action-required-labels +name: Pull Request Required Labels +on: + pull_request: + types: [ opened, labeled, unlabeled, synchronize ] +jobs: + label: + if: github.event.pull_request.state == 'open' + runs-on: ubuntu-latest + name: Verify Pull Request has labels + steps: + - uses: mheap/github-action-required-labels@v5 + with: + mode: minimum + count: 1 + labels: "breaking-change, feature, enhancement, bug, dependencies, docker-update-images, documentation, housekeeping" \ No newline at end of file diff --git a/.mvn/wrapper/maven-wrapper.jar b/.mvn/wrapper/maven-wrapper.jar new file mode 100644 index 0000000..cb28b0e Binary files /dev/null and b/.mvn/wrapper/maven-wrapper.jar differ diff --git a/.mvn/wrapper/maven-wrapper.properties b/.mvn/wrapper/maven-wrapper.properties new file mode 100644 index 0000000..eacdc9e --- /dev/null +++ b/.mvn/wrapper/maven-wrapper.properties @@ -0,0 +1,18 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +distributionUrl=https://repo.maven.apache.org/maven2/org/apache/maven/apache-maven/3.9.5/apache-maven-3.9.5-bin.zip +wrapperUrl=https://repo.maven.apache.org/maven2/org/apache/maven/wrapper/maven-wrapper/3.2.0/maven-wrapper-3.2.0.jar diff --git a/LICENSE b/LICENSE new file mode 100644 index 0000000..261eeb9 --- /dev/null +++ b/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/README.md b/README.md index a9ecb04..0a68c6e 100644 --- a/README.md +++ b/README.md @@ -1,3 +1,5 @@ +[![codecov](https://codecov.io/gh/Playtika/nosql-batch-updater/branch/master/graph/badge.svg)](https://codecov.io/gh/Playtika/nosql-batch-updater) +[![Maven Central](https://maven-badges.herokuapp.com/maven-central/com.playtika.nosql/batch-updater-parent/badge.svg)](https://maven-badges.herokuapp.com/maven-central/com.playtika.nosql/batch-updater-parent) # nosql-batch-updater Allows to run batch updates on NoSql DBs with eventually consistent guarantee. Some NoSql DBs (like Cassandra) already have built-in batch update mechanism but most of them have no such option. diff --git a/aerospike-batch-updater/pom.xml b/aerospike-batch-updater/pom.xml new file mode 100644 index 0000000..604d243 --- /dev/null +++ b/aerospike-batch-updater/pom.xml @@ -0,0 +1,86 @@ + + + + 4.0.0 + + + com.playtika.nosql + batch-updater-parent + 0.0.22 + + + aerospike-batch-updater + jar + Batch updates on Aerospike + + + + com.aerospike + aerospike-client + provided + + + + com.playtika.nosql + batch-updater + ${project.version} + + + + org.slf4j + slf4j-api + + + + + com.playtika.nosql + batch-updater + test-jar + test + + + + com.playtika.nosql + aerospike-container + test + + + + junit + junit + test + + + + org.assertj + assertj-core + test + + + + org.apache.logging.log4j + log4j-slf4j-impl + test + + + + org.awaitility + awaitility + test + + + \ No newline at end of file diff --git a/aerospike-batch-updater/src/main/java/nosql/batch/update/aerospike/basic/AerospikeBasicBatchUpdate.java b/aerospike-batch-updater/src/main/java/nosql/batch/update/aerospike/basic/AerospikeBasicBatchUpdate.java new file mode 100644 index 0000000..22a468a --- /dev/null +++ b/aerospike-batch-updater/src/main/java/nosql/batch/update/aerospike/basic/AerospikeBasicBatchUpdate.java @@ -0,0 +1,27 @@ +package nosql.batch.update.aerospike.basic; + +import nosql.batch.update.BatchUpdate; +import nosql.batch.update.aerospike.basic.lock.AerospikeBasicBatchLocks; + +import java.util.List; + +public class AerospikeBasicBatchUpdate implements BatchUpdate> { + + private final AerospikeBasicBatchLocks locks; + private final List updates; + + public AerospikeBasicBatchUpdate(AerospikeBasicBatchLocks locks, List updates) { + this.locks = locks; + this.updates = updates; + } + + @Override + public AerospikeBasicBatchLocks locks() { + return locks; + } + + @Override + public List updates() { + return updates; + } +} diff --git a/aerospike-batch-updater/src/main/java/nosql/batch/update/aerospike/basic/AerospikeBasicBatchUpdateSerde.java b/aerospike-batch-updater/src/main/java/nosql/batch/update/aerospike/basic/AerospikeBasicBatchUpdateSerde.java new file mode 100644 index 0000000..52637b3 --- /dev/null +++ b/aerospike-batch-updater/src/main/java/nosql/batch/update/aerospike/basic/AerospikeBasicBatchUpdateSerde.java @@ -0,0 +1,78 @@ +package nosql.batch.update.aerospike.basic; + +import com.aerospike.client.Bin; +import com.aerospike.client.Key; +import com.aerospike.client.Value; +import nosql.batch.update.BatchUpdate; +import nosql.batch.update.aerospike.basic.lock.AerospikeBasicBatchLocks; +import nosql.batch.update.aerospike.wal.AerospikeBatchUpdateSerde; + +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Iterator; +import java.util.List; +import java.util.Map; +import java.util.stream.Collectors; + +import static com.aerospike.client.Value.get; + +public class AerospikeBasicBatchUpdateSerde + implements AerospikeBatchUpdateSerde, List> { + + private static final String EXPECTED_VALUES_BIN_NAME = "expected_values"; + private static final String UPDATES_BIN_NAME = "updates"; + + @Override + public List write(BatchUpdate> batch) { + return Arrays.asList( + new Bin(EXPECTED_VALUES_BIN_NAME, recordsToValue(batch.locks().expectedValues())), + new Bin(UPDATES_BIN_NAME, recordsToValue(batch.updates()))); + } + + @Override + public BatchUpdate> read(Map bins) { + return new AerospikeBasicBatchUpdate( + new AerospikeBasicBatchLocks(recordsFromValue(bins.get(EXPECTED_VALUES_BIN_NAME))), + recordsFromValue(bins.get(UPDATES_BIN_NAME))); + } + + private static Value recordsToValue(List records){ + return get(records.stream() + .map(AerospikeBasicBatchUpdateSerde::recordToValue) + .collect(Collectors.toList())); + } + + private static Value recordToValue(Record record){ + List recordValues = new ArrayList<>(); + recordValues.add(get(record.key.namespace)); + recordValues.add(get(record.key.setName)); + recordValues.add(record.key.userKey); + for(Bin bin : record.bins){ + recordValues.add(get(bin.name)); + recordValues.add(bin.value); + } + return get(recordValues); + } + + private static List recordsFromValue(Object value){ + List listOfRecords = (List) value; + List records = new ArrayList<>(listOfRecords.size()); + for(Object record : listOfRecords){ + records.add(recordFromValues((List) record)); + } + return records; + } + + private static Record recordFromValues(List recordValue){ + Iterator it = recordValue.iterator(); + Key key = new Key((String) it.next(), + (String) it.next(), + get(it.next())); + List bins = new ArrayList<>((recordValue.size() - 3) / 2); + while (it.hasNext()) { + bins.add(new Bin((String)it.next(), Value.get(it.next()))); + } + + return new Record(key, bins); + } +} diff --git a/aerospike-batch-updater/src/main/java/nosql/batch/update/aerospike/basic/AerospikeBasicBatchUpdater.java b/aerospike-batch-updater/src/main/java/nosql/batch/update/aerospike/basic/AerospikeBasicBatchUpdater.java new file mode 100644 index 0000000..945a619 --- /dev/null +++ b/aerospike-batch-updater/src/main/java/nosql/batch/update/aerospike/basic/AerospikeBasicBatchUpdater.java @@ -0,0 +1,57 @@ +package nosql.batch.update.aerospike.basic; + +import com.aerospike.client.IAerospikeClient; +import com.aerospike.client.Value; +import nosql.batch.update.BatchOperations; +import nosql.batch.update.aerospike.basic.lock.AerospikeBasicBatchLocks; +import nosql.batch.update.aerospike.lock.AerospikeLock; +import nosql.batch.update.aerospike.lock.AerospikeLockOperations; +import nosql.batch.update.aerospike.wal.AerospikeWriteAheadLogManager; + +import java.time.Clock; +import java.util.List; +import java.util.concurrent.ExecutorService; + +public class AerospikeBasicBatchUpdater { + + public static BatchOperations, AerospikeLock, Value> basicOperations( + IAerospikeClient client, + String walNamespace, + String walSetName, + Clock clock, + ExecutorService aerospikeExecutorService, + ExecutorService batchExecutorService){ + + AerospikeWriteAheadLogManager, List> walManager = + basicWalManager(client, walNamespace, walSetName, clock); + + AerospikeLockOperations> lockOperations = + basicLockOperations(client, aerospikeExecutorService); + + AerospikeBasicUpdateOperations updateOperations = basicUpdateOperations(client, aerospikeExecutorService); + + return new BatchOperations<>(walManager, lockOperations, updateOperations, batchExecutorService); + } + + public static AerospikeBasicUpdateOperations basicUpdateOperations( + IAerospikeClient client, ExecutorService executorService) { + return new AerospikeBasicUpdateOperations(client, executorService); + } + + public static AerospikeLockOperations> basicLockOperations( + IAerospikeClient reactorClient, + ExecutorService aerospikeExecutorService) { + return new AerospikeLockOperations<>( + reactorClient, + new AerospikeBasicExpectedValueOperations(reactorClient), + aerospikeExecutorService); + } + + public static AerospikeWriteAheadLogManager, List> basicWalManager( + IAerospikeClient client, String walNamespace, String walSetName, Clock clock) { + return new AerospikeWriteAheadLogManager<>( + client, walNamespace, walSetName, + new AerospikeBasicBatchUpdateSerde(), clock); + } + +} diff --git a/aerospike-batch-updater/src/main/java/nosql/batch/update/aerospike/basic/AerospikeBasicExpectedValueOperations.java b/aerospike-batch-updater/src/main/java/nosql/batch/update/aerospike/basic/AerospikeBasicExpectedValueOperations.java new file mode 100644 index 0000000..469d3b2 --- /dev/null +++ b/aerospike-batch-updater/src/main/java/nosql/batch/update/aerospike/basic/AerospikeBasicExpectedValueOperations.java @@ -0,0 +1,64 @@ +package nosql.batch.update.aerospike.basic; + +import com.aerospike.client.BatchRead; +import com.aerospike.client.Bin; +import com.aerospike.client.IAerospikeClient; +import com.aerospike.client.Value; +import nosql.batch.update.aerospike.lock.AerospikeExpectedValuesOperations; +import nosql.batch.update.aerospike.lock.AerospikeLock; +import nosql.batch.update.lock.Lock; +import nosql.batch.update.lock.PermanentLockingException; + +import java.util.ArrayList; +import java.util.List; + + +public class AerospikeBasicExpectedValueOperations implements AerospikeExpectedValuesOperations> { + + private final IAerospikeClient client; + + public AerospikeBasicExpectedValueOperations(IAerospikeClient client) { + this.client = client; + } + + @Override + public void checkExpectedValues(List locks, List expectedValues) throws PermanentLockingException { + + if(locks.size() != expectedValues.size()){ + throw new IllegalArgumentException("locks.size() != expectedValues.size()"); + } + + List batchReads = new ArrayList<>(expectedValues.size()); + List expectedValuesToCheck = new ArrayList<>(expectedValues.size()); + for(int i = 0, n = expectedValues.size(); i < n; i++){ + if(locks.get(i).lockType == Lock.LockType.SAME_BATCH){ + continue; + } + Record record = expectedValues.get(i); + batchReads.add(new BatchRead(record.key, record.bins.stream() + .map(bin -> bin.name) + .toArray(String[]::new))); + expectedValuesToCheck.add(record); + } + + client.get(null, batchReads); + for(int i = 0, n = expectedValuesToCheck.size(); i < n; i++){ + checkValues(batchReads.get(i), expectedValuesToCheck.get(i)); + } + } + + private void checkValues(BatchRead batchRead, Record expectedValues) throws PermanentLockingException { + for(Bin bin : expectedValues.bins){ + Object actualValue = batchRead.record != null ? batchRead.record.getValue(bin.name) : null; + if(!equals(actualValue, bin.value)){ + throw new PermanentLockingException(String.format( + "Unexpected value: bin=[%s], expected=[%s], actual=[%s]", + bin.name, bin.value, actualValue)); + } + } + } + + private boolean equals(Object actualValue, Value expectedValue) { + return expectedValue.equals(Value.get(actualValue)); + } +} diff --git a/aerospike-batch-updater/src/main/java/nosql/batch/update/aerospike/basic/AerospikeBasicUpdateOperations.java b/aerospike-batch-updater/src/main/java/nosql/batch/update/aerospike/basic/AerospikeBasicUpdateOperations.java new file mode 100644 index 0000000..58f6937 --- /dev/null +++ b/aerospike-batch-updater/src/main/java/nosql/batch/update/aerospike/basic/AerospikeBasicUpdateOperations.java @@ -0,0 +1,40 @@ +package nosql.batch.update.aerospike.basic; + +import com.aerospike.client.Bin; +import com.aerospike.client.IAerospikeClient; +import com.aerospike.client.policy.WritePolicy; +import nosql.batch.update.UpdateOperations; + +import java.util.ArrayList; +import java.util.List; +import java.util.concurrent.CompletableFuture; +import java.util.concurrent.ExecutorService; + +import static java.util.concurrent.CompletableFuture.allOf; +import static java.util.concurrent.CompletableFuture.runAsync; + +public class AerospikeBasicUpdateOperations implements UpdateOperations> { + + private final IAerospikeClient client; + private final WritePolicy writePolicy; + private final ExecutorService executorService; + + public AerospikeBasicUpdateOperations(IAerospikeClient client, ExecutorService executorService) { + this.client = client; + this.writePolicy = client.getWritePolicyDefault(); + this.executorService = executorService; + } + + @Override + public void updateMany(List batchOfUpdates, boolean calledByWal) { + List> futures = new ArrayList<>(batchOfUpdates.size()); + for(Record record : batchOfUpdates){ + futures.add(runAsync(() -> update(record), executorService)); + } + allOf(futures.toArray(new CompletableFuture[0])).join(); + } + + private void update(Record record){ + client.put(writePolicy, record.key, record.bins.toArray(new Bin[0])); + } +} diff --git a/aerospike-batch-updater/src/main/java/nosql/batch/update/aerospike/basic/Record.java b/aerospike-batch-updater/src/main/java/nosql/batch/update/aerospike/basic/Record.java new file mode 100644 index 0000000..347acd5 --- /dev/null +++ b/aerospike-batch-updater/src/main/java/nosql/batch/update/aerospike/basic/Record.java @@ -0,0 +1,18 @@ +package nosql.batch.update.aerospike.basic; + +import com.aerospike.client.Bin; +import com.aerospike.client.Key; + +import java.util.List; + +public class Record { + + public final Key key; + public final List bins; + + public Record(Key key, List bins) { + this.key = key; + this.bins = bins; + } + +} diff --git a/aerospike-batch-updater/src/main/java/nosql/batch/update/aerospike/basic/lock/AerospikeBasicBatchLocks.java b/aerospike-batch-updater/src/main/java/nosql/batch/update/aerospike/basic/lock/AerospikeBasicBatchLocks.java new file mode 100644 index 0000000..f0c17fe --- /dev/null +++ b/aerospike-batch-updater/src/main/java/nosql/batch/update/aerospike/basic/lock/AerospikeBasicBatchLocks.java @@ -0,0 +1,33 @@ +package nosql.batch.update.aerospike.basic.lock; + +import com.aerospike.client.Key; +import nosql.batch.update.aerospike.basic.Record; +import nosql.batch.update.aerospike.lock.AerospikeBatchLocks; + +import java.util.List; +import java.util.stream.Collectors; + +public class AerospikeBasicBatchLocks implements AerospikeBatchLocks> { + + private final List records; + + public AerospikeBasicBatchLocks(List records) { + this.records = records; + } + + @Override + public List keysToLock() { + return records.stream() + .map(record -> toLockKey(record.key)) + .collect(Collectors.toList()); + } + + @Override + public List expectedValues() { + return records; + } + + public static Key toLockKey(Key key){ + return new Key(key.namespace, key.setName + ".lock", key.userKey); + } +} diff --git a/aerospike-batch-updater/src/main/java/nosql/batch/update/aerospike/basic/wal/AerospikeBasicWalCompleter.java b/aerospike-batch-updater/src/main/java/nosql/batch/update/aerospike/basic/wal/AerospikeBasicWalCompleter.java new file mode 100644 index 0000000..8341223 --- /dev/null +++ b/aerospike-batch-updater/src/main/java/nosql/batch/update/aerospike/basic/wal/AerospikeBasicWalCompleter.java @@ -0,0 +1,37 @@ +package nosql.batch.update.aerospike.basic.wal; + +import com.aerospike.client.Value; +import nosql.batch.update.BatchOperations; +import nosql.batch.update.aerospike.basic.Record; +import nosql.batch.update.aerospike.lock.AerospikeLock; +import nosql.batch.update.aerospike.wal.AerospikeExclusiveLocker; +import nosql.batch.update.aerospike.wal.AerospikeWriteAheadLogManager; +import nosql.batch.update.wal.WriteAheadLogCompleter; +import nosql.batch.update.wal.WriteAheadLogManager; + +import java.time.Duration; +import java.util.List; +import java.util.concurrent.Executors; + +public class AerospikeBasicWalCompleter { + + public static WriteAheadLogCompleter, AerospikeLock, Value> basicCompleter( + BatchOperations, AerospikeLock, Value> batchOperations, + Duration staleBatchesThreshold, int batchSize){ + WriteAheadLogManager, Value> writeAheadLogManager + = batchOperations.getWriteAheadLogManager(); + AerospikeWriteAheadLogManager aerospikeWriteAheadLogManager = (AerospikeWriteAheadLogManager)writeAheadLogManager; + + return new WriteAheadLogCompleter<>( + batchOperations, + staleBatchesThreshold, + batchSize, + new AerospikeExclusiveLocker( + aerospikeWriteAheadLogManager.getClient(), + aerospikeWriteAheadLogManager.getWalNamespace(), + aerospikeWriteAheadLogManager.getWalSetName()), + Executors.newScheduledThreadPool(1) + ); + } + +} diff --git a/aerospike-batch-updater/src/main/java/nosql/batch/update/aerospike/lock/AerospikeBatchLocks.java b/aerospike-batch-updater/src/main/java/nosql/batch/update/aerospike/lock/AerospikeBatchLocks.java new file mode 100644 index 0000000..f2b1ea7 --- /dev/null +++ b/aerospike-batch-updater/src/main/java/nosql/batch/update/aerospike/lock/AerospikeBatchLocks.java @@ -0,0 +1,12 @@ +package nosql.batch.update.aerospike.lock; + +import com.aerospike.client.Key; + +import java.util.List; + +public interface AerospikeBatchLocks { + + List keysToLock(); + EV expectedValues(); + +} diff --git a/aerospike-batch-updater/src/main/java/nosql/batch/update/aerospike/lock/AerospikeExpectedValuesOperations.java b/aerospike-batch-updater/src/main/java/nosql/batch/update/aerospike/lock/AerospikeExpectedValuesOperations.java new file mode 100644 index 0000000..722dee9 --- /dev/null +++ b/aerospike-batch-updater/src/main/java/nosql/batch/update/aerospike/lock/AerospikeExpectedValuesOperations.java @@ -0,0 +1,11 @@ +package nosql.batch.update.aerospike.lock; + +import nosql.batch.update.lock.PermanentLockingException; + +import java.util.List; + +public interface AerospikeExpectedValuesOperations{ + + void checkExpectedValues(List locks, EV expectedValues) throws PermanentLockingException; + +} diff --git a/aerospike-batch-updater/src/main/java/nosql/batch/update/aerospike/lock/AerospikeLock.java b/aerospike-batch-updater/src/main/java/nosql/batch/update/aerospike/lock/AerospikeLock.java new file mode 100644 index 0000000..ae657a9 --- /dev/null +++ b/aerospike-batch-updater/src/main/java/nosql/batch/update/aerospike/lock/AerospikeLock.java @@ -0,0 +1,26 @@ +package nosql.batch.update.aerospike.lock; + +import com.aerospike.client.Key; +import nosql.batch.update.lock.Lock; + +public class AerospikeLock extends Lock { + + public final Key key; + + public AerospikeLock(LockType lockType, Key key) { + super(lockType); + this.key = key; + } + + @Override + public boolean equals(Object o){ + AerospikeLock aerospikeLock = (AerospikeLock)o; + return aerospikeLock.lockType == lockType + && aerospikeLock.key.equals(key); + } + + @Override + public int hashCode(){ + return key.hashCode(); + } +} diff --git a/aerospike-batch-updater/src/main/java/nosql/batch/update/aerospike/lock/AerospikeLockOperations.java b/aerospike-batch-updater/src/main/java/nosql/batch/update/aerospike/lock/AerospikeLockOperations.java new file mode 100644 index 0000000..0c67151 --- /dev/null +++ b/aerospike-batch-updater/src/main/java/nosql/batch/update/aerospike/lock/AerospikeLockOperations.java @@ -0,0 +1,236 @@ +package nosql.batch.update.aerospike.lock; + +import com.aerospike.client.AerospikeException; +import com.aerospike.client.Bin; +import com.aerospike.client.IAerospikeClient; +import com.aerospike.client.Key; +import com.aerospike.client.Record; +import com.aerospike.client.ResultCode; +import com.aerospike.client.Value; +import com.aerospike.client.policy.RecordExistsAction; +import com.aerospike.client.policy.WritePolicy; +import nosql.batch.update.lock.LockOperations; +import nosql.batch.update.lock.LockingException; +import nosql.batch.update.lock.PermanentLockingException; +import nosql.batch.update.lock.TemporaryLockingException; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.util.ArrayList; +import java.util.List; +import java.util.concurrent.CompletableFuture; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.atomic.AtomicReference; + +import static java.util.Collections.singletonList; +import static java.util.concurrent.CompletableFuture.allOf; +import static java.util.concurrent.CompletableFuture.runAsync; +import static java.util.concurrent.CompletableFuture.supplyAsync; +import static nosql.batch.update.lock.Lock.LockType.LOCKED; +import static nosql.batch.update.lock.Lock.LockType.SAME_BATCH; + +public class AerospikeLockOperations, EV> implements LockOperations { + + private static final Logger logger = LoggerFactory.getLogger(AerospikeLockOperations.class); + + private static final String BATCH_ID_BIN_NAME = "batch_id"; + + private final IAerospikeClient aerospikeClient; + private final WritePolicy putLockPolicy; + private final WritePolicy deleteLockPolicy; + private final AerospikeExpectedValuesOperations expectedValuesOperations; + private final ExecutorService aerospikeExecutor; + + public AerospikeLockOperations(IAerospikeClient aerospikeClient, + AerospikeExpectedValuesOperations expectedValuesOperations, + ExecutorService aerospikeExecutor) { + this.putLockPolicy = configurePutLockPolicy(aerospikeClient.getWritePolicyDefault()); + this.aerospikeClient = aerospikeClient; + this.aerospikeExecutor = aerospikeExecutor; + this.deleteLockPolicy = putLockPolicy; + this.expectedValuesOperations = expectedValuesOperations; + } + + private WritePolicy configurePutLockPolicy(WritePolicy writePolicyDefault){ + WritePolicy writePolicy = new WritePolicy(writePolicyDefault); + writePolicy.recordExistsAction = RecordExistsAction.CREATE_ONLY; + writePolicy.expiration = -1; + return writePolicy; + } + + @Override + public List acquire(Value batchId, LOCKS batchLocks, boolean checkBatchId) throws LockingException { + List keysLocked = putLocks(batchId, batchLocks, checkBatchId); + checkExpectedValues(batchLocks, keysLocked); + return keysLocked; + } + + protected List putLocks( + Value batchId, + LOCKS batchLocks, + boolean checkTransactionId) throws TemporaryLockingException{ + + List keys = batchLocks.keysToLock(); + + if(keys.size() == 1){ + return singletonList(putLock(batchId, keys.get(0), checkTransactionId)); + } + + List>> futures = new ArrayList<>(keys.size()); + AtomicReference fail = new AtomicReference<>(); + for(Key lockKey : keys){ + futures.add(supplyAsync(() -> { + try { + if(fail.get() != null){ + return null; + } + AerospikeLock lock = putLock(batchId, lockKey, checkTransactionId); + return new LockResult<>(lock); + } catch (Throwable t) { + fail.set(t); + return new LockResult<>(t); + } + }, aerospikeExecutor)); + } + + allOf(futures.toArray(new CompletableFuture[0])).join(); + + return processResults(futures); + } + + static List processResults ( + List>> lockResults) throws LockingException { + List locks = new ArrayList<>(lockResults.size()); + Throwable resultError = null; + for(CompletableFuture> future : lockResults){ + LockResult lockResult = future.join(); + if(lockResult != null) { + if (lockResult.throwable != null) { + if (lockResult.throwable instanceof LockingException) { + if (resultError == null) { + resultError = lockResult.throwable; + } + } else { + //give priority to non LockingException + resultError = lockResult.throwable; + break; + } + } + locks.add(lockResult.value); + } + } + if(resultError != null){ + logger.error("Error while putting locks", resultError); + throw resultError instanceof LockingException + ? (LockingException)resultError + : new RuntimeException(resultError); + } + return locks; + } + + private AerospikeLock putLock(Value batchId, Key lockKey, boolean checkBatchId) throws TemporaryLockingException{ + try { + aerospikeClient.add(putLockPolicy, lockKey, new Bin(BATCH_ID_BIN_NAME, batchId)); + logger.trace("acquired lock key=[{}], batchId=[{}]", lockKey, batchId); + return new AerospikeLock(LOCKED, lockKey); + } catch (AerospikeException ae) { + if (ae.getResultCode() == ResultCode.KEY_EXISTS_ERROR) { + if (checkBatchId) { + Value actualBatchId = getBatchIdOfLock(lockKey); + if(batchId.equals(actualBatchId)){ + //check for same batch + //this is used only by WriteAheadLogCompleter to skip already locked keys + logger.info("Previously locked by this batch update key=[{}], batchId=[{}]", + lockKey, batchId); + return new AerospikeLock(SAME_BATCH, lockKey); + } else { + logger.error("Locked by other batch update key=[{}], batchId=[{}], actualBatchId=[{}]", + lockKey, batchId, actualBatchId); + throw new TemporaryLockingException(String.format( + "Locked by other batch update key=[%s], batchId=[%s], actualBatchId=[%s]", + lockKey, batchId, actualBatchId)); + } + } else { + Value batchIdLocked = getBatchIdOfLock(lockKey); + logger.info("Locked by concurrent update key=[{}], batchId=[{}], batchIdLocked=[{}]", + lockKey, batchId, batchIdLocked); + throw new TemporaryLockingException(String.format( + "Locked by concurrent update key=[%s], batchId=[%s], batchIdLocked=[%s]", + lockKey, batchId, batchIdLocked)); + } + } else { + logger.error("Unexpected error while acquiring lock key=[{}], batchId=[{}]", lockKey, batchId); + throw ae; + } + } + } + + protected void checkExpectedValues(LOCKS batchLocks, List keysLocked) throws PermanentLockingException { + expectedValuesOperations.checkExpectedValues(keysLocked, batchLocks.expectedValues()); + } + + private Value getBatchIdOfLock(Key lockKey){ + Record record = aerospikeClient.get(null, lockKey); + return getBatchId(record); + } + + private Value getBatchId(Record record) { + return record != null + ? Value.get(record.getValue(BATCH_ID_BIN_NAME)) : + //may have place if key get unlocked before we get response + Value.getAsNull(); + } + + @Override + public List getLockedByBatchUpdate(LOCKS aerospikeBatchLocks, Value batchId) { + List keys = aerospikeBatchLocks.keysToLock(); + + Key[] keysArray = keys.toArray(new Key[0]); + Record[] records = aerospikeClient.get(null, keysArray); + + List keysFiltered = new ArrayList<>(keys.size()); + for(int i = 0, m = keysArray.length; i < m; i++){ + Record record = records[i]; + if(record != null && batchId.equals(getBatchId(record))){ + keysFiltered.add(new AerospikeLock(SAME_BATCH, keysArray[i])); + } + } + return keysFiltered; + } + + @Override + public void release(List locks, Value batchId) { + if(locks.size() == 1){ + releaseLock(locks.get(0), batchId); + return; + } + + List> futures = new ArrayList<>(locks.size()); + for(AerospikeLock lock : locks){ + futures.add(runAsync(() -> releaseLock(lock, batchId), aerospikeExecutor)); + } + allOf(futures.toArray(new CompletableFuture[0])).join(); + } + + protected void releaseLock(AerospikeLock lock, Value batchId) { + aerospikeClient.delete(deleteLockPolicy, lock.key); + logger.trace("released lock key=[{}], batchId=[{}]", lock.key, batchId); + } + + public static class LockResult { + public final V value; + public final Throwable throwable; + + public LockResult(V value) { + this.value = value; + this.throwable = null; + } + + public LockResult(Throwable throwable) { + this.value = null; + this.throwable = throwable; + } + } + + +} diff --git a/aerospike-batch-updater/src/main/java/nosql/batch/update/aerospike/wal/AerospikeBatchUpdateSerde.java b/aerospike-batch-updater/src/main/java/nosql/batch/update/aerospike/wal/AerospikeBatchUpdateSerde.java new file mode 100644 index 0000000..eb46559 --- /dev/null +++ b/aerospike-batch-updater/src/main/java/nosql/batch/update/aerospike/wal/AerospikeBatchUpdateSerde.java @@ -0,0 +1,15 @@ +package nosql.batch.update.aerospike.wal; + +import com.aerospike.client.Bin; +import nosql.batch.update.BatchUpdate; +import nosql.batch.update.aerospike.lock.AerospikeBatchLocks; + +import java.util.List; +import java.util.Map; + +public interface AerospikeBatchUpdateSerde, UPDATES, EV> { + + List write(BatchUpdate batch); + + BatchUpdate read(Map bins); +} diff --git a/aerospike-batch-updater/src/main/java/nosql/batch/update/aerospike/wal/AerospikeExclusiveLocker.java b/aerospike-batch-updater/src/main/java/nosql/batch/update/aerospike/wal/AerospikeExclusiveLocker.java new file mode 100644 index 0000000..781d86c --- /dev/null +++ b/aerospike-batch-updater/src/main/java/nosql/batch/update/aerospike/wal/AerospikeExclusiveLocker.java @@ -0,0 +1,149 @@ +package nosql.batch.update.aerospike.wal; + +import com.aerospike.client.AerospikeException; +import com.aerospike.client.Bin; +import com.aerospike.client.IAerospikeClient; +import com.aerospike.client.Key; +import com.aerospike.client.ResultCode; +import com.aerospike.client.Value; +import com.aerospike.client.policy.GenerationPolicy; +import com.aerospike.client.policy.RecordExistsAction; +import com.aerospike.client.policy.WritePolicy; +import nosql.batch.update.wal.ExclusiveLocker; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.time.Duration; +import java.time.Instant; +import java.util.UUID; +import java.util.concurrent.Executors; +import java.util.concurrent.ScheduledExecutorService; +import java.util.concurrent.ScheduledFuture; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicInteger; +import java.util.concurrent.atomic.AtomicReference; + +import static java.time.temporal.ChronoUnit.SECONDS; +import static nosql.batch.update.aerospike.wal.AerospikeWriteAheadLogManager.getBytesFromUUID; +import static nosql.batch.update.util.AsyncUtil.shutdownAndAwaitTermination; + +public class AerospikeExclusiveLocker implements ExclusiveLocker { + + private static final Logger logger = LoggerFactory.getLogger(AerospikeExclusiveLocker.class); + + private static final Instant JAN_01_2010 = Instant.parse("2010-01-01T00:00:00.00Z"); + + private static final Value EXCLUSIVE_LOCK_KEY = Value.get((byte)0); + + private final IAerospikeClient client; + private final Duration exclusiveLockTtl; + private final ScheduledExecutorService scheduledExecutorService; + + private final WritePolicy putLockPolicy; + private final Bin exclusiveLockBin; + private final Key exclusiveLockKey; + private final AtomicInteger generation = new AtomicInteger(0); + private final AtomicReference scheduledFuture = new AtomicReference<>(); + + public AerospikeExclusiveLocker( + IAerospikeClient client, String namespace, String setName) { + this(client, namespace, setName, + Executors.newSingleThreadScheduledExecutor(), + Duration.ofSeconds(60)); + } + + public AerospikeExclusiveLocker( + IAerospikeClient client, String namespace, String setName, + ScheduledExecutorService scheduledExecutorService, Duration exclusiveLockTtl) { + this.client = client; + this.exclusiveLockTtl = exclusiveLockTtl; + this.scheduledExecutorService = scheduledExecutorService; + + this.putLockPolicy = buildPutLockPolicy(); + + this.exclusiveLockBin = new Bin("EL", getBytesFromUUID(UUID.randomUUID())); + + exclusiveLockKey = new Key(namespace, setName, EXCLUSIVE_LOCK_KEY); + } + + @Override + public boolean acquire(){ + if(generation.get() > 0){ + return true; + } + + try { + client.put(putLockPolicy, exclusiveLockKey, exclusiveLockBin); + generation.incrementAndGet(); + logger.info("Successfully got exclusive WAL lock"); + + scheduledFuture.set(scheduledExecutorService.scheduleAtFixedRate(this::upgradeLock, + exclusiveLockTtl.getSeconds() / 2, + exclusiveLockTtl.getSeconds() / 2, TimeUnit.SECONDS)); + + return true; + } catch (AerospikeException e){ + if(e.getResultCode() == ResultCode.KEY_EXISTS_ERROR){ + logger.debug("Failed to get exclusive WAL lock, will try later"); + int expiration = client.get(null, exclusiveLockKey).expiration; + logger.debug("WAL lock will be released at {}", JAN_01_2010.plus(expiration, SECONDS)); + return false; + } else { + logger.error("Failed while getting exclusive WAL lock", e); + throw e; + } + } + } + + @Override + public void release() { + if(generation.get() > 0){ + client.delete(null, exclusiveLockKey); + reset(); + } + } + + private WritePolicy buildPutLockPolicy(){ + WritePolicy putLockPolicy = new WritePolicy(); + putLockPolicy.recordExistsAction = RecordExistsAction.CREATE_ONLY; + putLockPolicy.expiration = (int) exclusiveLockTtl.get(SECONDS); + return putLockPolicy; + } + + private void upgradeLock(){ + try { + client.touch(buildTouchLockPolicy(), exclusiveLockKey); + generation.incrementAndGet(); + logger.info("Successfully upgraded WAL lock"); + } catch (AerospikeException e){ + logger.error("Failed while upgrading WAL lock", e); + //downgrade lock + reset(); + throw e; + } + } + + private void reset(){ + generation.set(0); + + if(scheduledFuture.get() != null){ + scheduledFuture.get().cancel(false); + scheduledFuture.set(null); + } + } + + private WritePolicy buildTouchLockPolicy(){ + WritePolicy touchLockPolicy = new WritePolicy(); + touchLockPolicy.recordExistsAction = RecordExistsAction.UPDATE_ONLY; + touchLockPolicy.generation = this.generation.get(); + touchLockPolicy.generationPolicy = GenerationPolicy.EXPECT_GEN_EQUAL; + touchLockPolicy.expiration = (int) exclusiveLockTtl.get(SECONDS); + return touchLockPolicy; + } + + @Override + public void shutdown(){ + shutdownAndAwaitTermination(scheduledExecutorService); + } + +} diff --git a/aerospike-batch-updater/src/main/java/nosql/batch/update/aerospike/wal/AerospikeWriteAheadLogManager.java b/aerospike-batch-updater/src/main/java/nosql/batch/update/aerospike/wal/AerospikeWriteAheadLogManager.java new file mode 100644 index 0000000..d0394f5 --- /dev/null +++ b/aerospike-batch-updater/src/main/java/nosql/batch/update/aerospike/wal/AerospikeWriteAheadLogManager.java @@ -0,0 +1,206 @@ +package nosql.batch.update.aerospike.wal; + +import com.aerospike.client.AerospikeException; +import com.aerospike.client.Bin; +import com.aerospike.client.IAerospikeClient; +import com.aerospike.client.Key; +import com.aerospike.client.Record; +import com.aerospike.client.ResultCode; +import com.aerospike.client.Value; +import com.aerospike.client.policy.RecordExistsAction; +import com.aerospike.client.policy.WritePolicy; +import com.aerospike.client.query.Filter; +import com.aerospike.client.query.IndexType; +import com.aerospike.client.query.RecordSet; +import com.aerospike.client.query.Statement; +import nosql.batch.update.BatchUpdate; +import nosql.batch.update.aerospike.lock.AerospikeBatchLocks; +import nosql.batch.update.wal.WalRecord; +import nosql.batch.update.wal.WalTimeRange; +import nosql.batch.update.wal.WriteAheadLogManager; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.nio.ByteBuffer; +import java.time.Clock; +import java.time.Duration; +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; +import java.util.UUID; + +public class AerospikeWriteAheadLogManager, UPDATES, EV> + implements WriteAheadLogManager { + + private static final Logger logger = LoggerFactory.getLogger(AerospikeWriteAheadLogManager.class); + + private static final String UUID_BIN_NAME = "uuid"; + private static final String TIMESTAMP_BIN_NAME = "timestamp"; + + private final IAerospikeClient client; + private final String walNamespace; + private final String walSetName; + private final WritePolicy writePolicy; + private final WritePolicy deletePolicy; + private final AerospikeBatchUpdateSerde batchSerializer; + private final Clock clock; + + public AerospikeWriteAheadLogManager(IAerospikeClient client, + String walNamespace, String walSetName, + AerospikeBatchUpdateSerde batchSerializer, + Clock clock) { + this.client = client; + this.walNamespace = walNamespace; + this.walSetName = walSetName; + this.writePolicy = configureWritePolicy(client.getWritePolicyDefault()); + this.deletePolicy = this.writePolicy; + this.batchSerializer = batchSerializer; + this.clock = clock; + + createSecondaryIndexOnTimestamp(); + } + + private WritePolicy configureWritePolicy(WritePolicy writePolicyDefault){ + WritePolicy writePolicy = new WritePolicy(writePolicyDefault); + writePolicy.recordExistsAction = RecordExistsAction.CREATE_ONLY; + writePolicy.sendKey = true; + writePolicy.expiration = -1; + return writePolicy; + } + + @Override + public Value writeBatch(BatchUpdate batch) { + Value batchId = generateBatchId(); + + List batchBins = batchSerializer.write(batch); + List bins = new ArrayList<>(batchBins.size() + 1); + bins.addAll(batchBins); + bins.add(new Bin(UUID_BIN_NAME, batchId)); + bins.add(new Bin(TIMESTAMP_BIN_NAME, Value.get(clock.millis()))); + + try { + client.put(writePolicy, + new Key(walNamespace, walSetName, batchId), + bins.toArray(new Bin[0])); + return batchId; + } catch (AerospikeException ae){ + if(ae.getResultCode() == ResultCode.RECORD_TOO_BIG){ + logger.error("update data size to big: {}", batchBins.stream().mapToInt(bin -> bin.value.estimateSize()).sum()); + } + throw ae; + } + } + + public static Value generateBatchId() { + return Value.get(getBytesFromUUID(UUID.randomUUID())); + } + + @Override + public boolean deleteBatch(Value batchId) { + return client.delete(deletePolicy, new Key(walNamespace, walSetName, batchId)); + } + + @Override + public List getTimeRanges(Duration staleThreshold, int batchSize) { + Statement statement = staleBatchesStatement(staleThreshold, walNamespace, walSetName, clock); + RecordSet recordSet = client.query(null, statement); + + List timestamps = new ArrayList<>(); + recordSet.iterator().forEachRemaining(keyRecord -> timestamps.add(keyRecord.record.getLong(TIMESTAMP_BIN_NAME))); + Collections.sort(timestamps); + + return getTimeRangesForTimestamps(timestamps, batchSize); + } + + @Override + public List> getStaleBatchesForRange(WalTimeRange timeRange) { + Statement statement = staleBatchesStatement(walNamespace, walSetName, timeRange.getFromTimestamp(), timeRange.getToTimestamp()); + RecordSet recordSet = client.query(null, statement); + + List> staleTransactions = new ArrayList<>(); + recordSet.iterator().forEachRemaining(keyRecord -> { + Record record = keyRecord.record; + staleTransactions.add(new WalRecord<>( + Value.get(record.getValue(UUID_BIN_NAME)), + record.getLong(TIMESTAMP_BIN_NAME), + batchSerializer.read(record.bins))); + }); + Collections.sort(staleTransactions); + + return staleTransactions; + } + + public static Statement staleBatchesStatement(Duration staleThreshold, String walNamespace, String walSetName, Clock clock) { + Statement statement = new Statement(); + statement.setNamespace(walNamespace); + statement.setSetName(walSetName); + statement.setFilter(Filter.range(TIMESTAMP_BIN_NAME, + 0, Math.max(clock.millis() - staleThreshold.toMillis(), 0))); + return statement; + } + + public static Statement staleBatchesStatement(String walNamespace, String walSetName, long begin, long end) { + Statement statement = new Statement(); + statement.setNamespace(walNamespace); + statement.setSetName(walSetName); + statement.setFilter(Filter.range(TIMESTAMP_BIN_NAME, begin, end)); + return statement; + } + + public static List getTimeRangesForTimestamps(List timestamps, int batchSize) { + List walTimeRanges = new ArrayList<>(); + + int fromIdx = 0; + int size = timestamps.size(); + int toIdx = Math.min(batchSize, size) - 1; + + while (fromIdx < size) { + long fromTimestamp = timestamps.get(fromIdx); + long toTimestamp = timestamps.get(toIdx); + walTimeRanges.add(new WalTimeRange(fromTimestamp, toTimestamp)); + + fromIdx = toIdx; + while (fromIdx < size && timestamps.get(fromIdx) == toTimestamp) { + fromIdx++; + } + + toIdx = Math.min(fromIdx + batchSize, size) - 1; + } + + return walTimeRanges; + } + + static byte[] getBytesFromUUID(UUID uuid) { + ByteBuffer bb = ByteBuffer.wrap(new byte[16]); + bb.putLong(uuid.getMostSignificantBits()); + bb.putLong(uuid.getLeastSignificantBits()); + + return bb.array(); + } + + private void createSecondaryIndexOnTimestamp() { + try { + String indexName = walSetName + "_timestamp"; + client.createIndex(null, walNamespace, walSetName, indexName, TIMESTAMP_BIN_NAME, IndexType.NUMERIC) + .waitTillComplete(200, 0); + } catch (AerospikeException ae) { + if(ae.getResultCode() == ResultCode.INDEX_ALREADY_EXISTS){ + logger.info("Will not create WAL secondary index as it already exists"); + } else { + throw ae; + } + } + } + + public String getWalNamespace() { + return walNamespace; + } + + public String getWalSetName() { + return walSetName; + } + + public IAerospikeClient getClient() { + return client; + } +} diff --git a/aerospike-batch-updater/src/test/java/nosql/batch/update/aerospike/AerospikeTestUtils.java b/aerospike-batch-updater/src/test/java/nosql/batch/update/aerospike/AerospikeTestUtils.java new file mode 100644 index 0000000..48c75fc --- /dev/null +++ b/aerospike-batch-updater/src/test/java/nosql/batch/update/aerospike/AerospikeTestUtils.java @@ -0,0 +1,30 @@ +package nosql.batch.update.aerospike; + +import com.aerospike.AerospikeContainerUtils; +import com.aerospike.AerospikeProperties; +import com.aerospike.client.AerospikeClient; +import com.aerospike.client.async.EventLoops; +import com.aerospike.client.policy.ClientPolicy; +import org.testcontainers.containers.GenericContainer; + +public class AerospikeTestUtils { + + public static AerospikeProperties AEROSPIKE_PROPERTIES = new AerospikeProperties(); + + public static GenericContainer getAerospikeContainer() { + return AerospikeContainerUtils.startAerospikeContainer(AEROSPIKE_PROPERTIES); + } + + public static AerospikeClient getAerospikeClient(GenericContainer aerospike) { + return getAerospikeClient(aerospike, null); + } + + public static AerospikeClient getAerospikeClient(GenericContainer aerospike, EventLoops eventLoops) { + ClientPolicy clientPolicy = new ClientPolicy(); + clientPolicy.eventLoops = eventLoops; + clientPolicy.writePolicyDefault.durableDelete = true; + return new AerospikeClient(clientPolicy, aerospike.getContainerIpAddress(), + aerospike.getMappedPort(AEROSPIKE_PROPERTIES.getPort())); + } + +} diff --git a/aerospike-batch-updater/src/test/java/nosql/batch/update/aerospike/basic/AerospikeBasicFailingUpdateOperations.java b/aerospike-batch-updater/src/test/java/nosql/batch/update/aerospike/basic/AerospikeBasicFailingUpdateOperations.java new file mode 100644 index 0000000..55d8ac5 --- /dev/null +++ b/aerospike-batch-updater/src/test/java/nosql/batch/update/aerospike/basic/AerospikeBasicFailingUpdateOperations.java @@ -0,0 +1,31 @@ +package nosql.batch.update.aerospike.basic; + +import nosql.batch.update.FailingUpdateOperations; +import nosql.batch.update.UpdateOperations; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.util.List; +import java.util.concurrent.atomic.AtomicBoolean; + +import static nosql.batch.update.util.HangingUtil.selectFlaking; + +public class AerospikeBasicFailingUpdateOperations extends FailingUpdateOperations> { + + private static Logger logger = LoggerFactory.getLogger(AerospikeBasicFailingUpdateOperations.class); + + public AerospikeBasicFailingUpdateOperations(UpdateOperations> updateOperations, AtomicBoolean failsUpdate) { + super(updateOperations, failsUpdate); + } + + public static AerospikeBasicFailingUpdateOperations failingUpdates( + UpdateOperations> updateOperations, AtomicBoolean failsUpdate){ + return new AerospikeBasicFailingUpdateOperations(updateOperations, failsUpdate); + } + + @Override + protected List selectFlakingToUpdate(List records) { + return selectFlaking(records, + key -> logger.info("batch update failed flaking for key [{}]", key)); + } +} diff --git a/aerospike-batch-updater/src/test/java/nosql/batch/update/aerospike/basic/AerospikeBasicHangingUpdateOperations.java b/aerospike-batch-updater/src/test/java/nosql/batch/update/aerospike/basic/AerospikeBasicHangingUpdateOperations.java new file mode 100644 index 0000000..947eb3d --- /dev/null +++ b/aerospike-batch-updater/src/test/java/nosql/batch/update/aerospike/basic/AerospikeBasicHangingUpdateOperations.java @@ -0,0 +1,31 @@ +package nosql.batch.update.aerospike.basic; + +import nosql.batch.update.HangingUpdateOperations; +import nosql.batch.update.UpdateOperations; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.util.List; +import java.util.concurrent.atomic.AtomicBoolean; + +import static nosql.batch.update.util.HangingUtil.selectFlaking; + +public class AerospikeBasicHangingUpdateOperations extends HangingUpdateOperations> { + + private static final Logger logger = LoggerFactory.getLogger(AerospikeBasicHangingUpdateOperations.class); + + public AerospikeBasicHangingUpdateOperations(UpdateOperations> updateOperations, AtomicBoolean failsUpdate) { + super(updateOperations, failsUpdate); + } + + public static AerospikeBasicHangingUpdateOperations hangingUpdates( + UpdateOperations> updateOperations, AtomicBoolean failsUpdate){ + return new AerospikeBasicHangingUpdateOperations(updateOperations, failsUpdate); + } + + @Override + protected List selectFlakingToUpdate(List records) { + return selectFlaking(records, + key -> logger.info("batch update failed flaking for key [{}]", key)); + } +} diff --git a/aerospike-batch-updater/src/test/java/nosql/batch/update/aerospike/basic/BasicBatchRetentionTest.java b/aerospike-batch-updater/src/test/java/nosql/batch/update/aerospike/basic/BasicBatchRetentionTest.java new file mode 100644 index 0000000..aa8c112 --- /dev/null +++ b/aerospike-batch-updater/src/test/java/nosql/batch/update/aerospike/basic/BasicBatchRetentionTest.java @@ -0,0 +1,94 @@ +package nosql.batch.update.aerospike.basic; + +import com.aerospike.client.AerospikeClient; +import com.aerospike.client.Key; +import com.aerospike.client.Value; +import com.aerospike.client.async.NioEventLoops; +import nosql.batch.update.BatchOperations; +import nosql.batch.update.BatchRetentionTest; +import nosql.batch.update.BatchUpdater; +import nosql.batch.update.aerospike.basic.lock.AerospikeBasicBatchLocks; +import nosql.batch.update.aerospike.lock.AerospikeLock; +import nosql.batch.update.util.FixedClock; +import nosql.batch.update.wal.CompletionStatistic; +import nosql.batch.update.wal.WriteAheadLogCompleter; +import org.testcontainers.containers.GenericContainer; + +import java.time.Duration; +import java.util.List; +import java.util.concurrent.Executors; +import java.util.concurrent.atomic.AtomicInteger; + +import static nosql.batch.update.aerospike.AerospikeTestUtils.AEROSPIKE_PROPERTIES; +import static nosql.batch.update.aerospike.AerospikeTestUtils.getAerospikeClient; +import static nosql.batch.update.aerospike.AerospikeTestUtils.getAerospikeContainer; +import static nosql.batch.update.aerospike.basic.BasicConsistencyTest.getValue; +import static nosql.batch.update.aerospike.basic.BasicConsistencyTest.incrementBoth; +import static nosql.batch.update.aerospike.basic.util.BasicFailingOperationsUtil.failingOperations; +import static org.assertj.core.api.Assertions.assertThat; +import static org.awaitility.Awaitility.await; +import static org.awaitility.Duration.ONE_SECOND; + +public class BasicBatchRetentionTest extends BatchRetentionTest { + + static final GenericContainer aerospike = getAerospikeContainer(); + + static final AerospikeClient client = getAerospikeClient(aerospike, new NioEventLoops()); + + static final FixedClock clock = new FixedClock(); + + static BatchOperations, AerospikeLock, Value> operations + = failingOperations(client, clock, Executors.newCachedThreadPool(), + failsAcquireLock, failsCheckValue, failsMutate, failsReleaseLock, failsDeleteBatch, deletesInProcess); + + static BatchUpdater, AerospikeLock, Value> updater + = new BatchUpdater<>(operations); + + public static final Duration STALE_BATCHES_THRESHOLD = Duration.ofSeconds(1); + public static final int BATCH_SIZE = 100; + + static WriteAheadLogCompleter, AerospikeLock, Value> walCompleter + = new WriteAheadLogCompleter<>( + operations, STALE_BATCHES_THRESHOLD, BATCH_SIZE, + new BasicRecoveryTest.DummyExclusiveLocker(), + Executors.newScheduledThreadPool(1)); + + static AtomicInteger keyCounter = new AtomicInteger(); + + private Key key1; + private Key key2; + + @Override + protected void runUpdate() { + for(int i = 0; i < 10; i++){ + incrementBoth(key1, key2, updater, client); + } + } + + @Override + protected void checkForConsistency() { + assertThat(getValue(key1, client)).isEqualTo(getValue(key2, client)); + + await().timeout(ONE_SECOND).untilAsserted(() -> + assertThat(operations.getWriteAheadLogManager().getTimeRanges(STALE_BATCHES_THRESHOLD, BATCH_SIZE)).isEmpty()); + } + + private int setNameCounter = 0; + + @Override + protected void cleanUp() { + String setName = String.valueOf(setNameCounter++); + key1 = new Key(AEROSPIKE_PROPERTIES.getNamespace(), setName, keyCounter.incrementAndGet()); + key2 = new Key(AEROSPIKE_PROPERTIES.getNamespace(), setName, keyCounter.incrementAndGet()); + + clock.setTime(0); + } + + @Override + protected CompletionStatistic runCompleter() { + clock.setTime(STALE_BATCHES_THRESHOLD.toMillis() + 1); + return walCompleter.completeHangedTransactions(); + } + + +} diff --git a/aerospike-batch-updater/src/test/java/nosql/batch/update/aerospike/basic/BasicConsistencyTest.java b/aerospike-batch-updater/src/test/java/nosql/batch/update/aerospike/basic/BasicConsistencyTest.java new file mode 100644 index 0000000..6071aaa --- /dev/null +++ b/aerospike-batch-updater/src/test/java/nosql/batch/update/aerospike/basic/BasicConsistencyTest.java @@ -0,0 +1,127 @@ +package nosql.batch.update.aerospike.basic; + +import com.aerospike.client.AerospikeClient; +import com.aerospike.client.Bin; +import com.aerospike.client.Key; +import com.aerospike.client.Value; +import com.aerospike.client.async.NioEventLoops; +import nosql.batch.update.BatchOperations; +import nosql.batch.update.BatchUpdater; +import nosql.batch.update.aerospike.basic.lock.AerospikeBasicBatchLocks; +import nosql.batch.update.aerospike.lock.AerospikeLock; +import nosql.batch.update.lock.LockingException; +import org.junit.Test; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +import org.testcontainers.containers.GenericContainer; + +import java.time.Clock; +import java.util.List; +import java.util.Random; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; +import java.util.concurrent.Future; +import java.util.concurrent.atomic.AtomicInteger; + +import static java.util.Arrays.asList; +import static java.util.Collections.singletonList; +import static nosql.batch.update.aerospike.AerospikeTestUtils.AEROSPIKE_PROPERTIES; +import static nosql.batch.update.aerospike.AerospikeTestUtils.getAerospikeClient; +import static nosql.batch.update.aerospike.AerospikeTestUtils.getAerospikeContainer; +import static nosql.batch.update.aerospike.basic.AerospikeBasicBatchUpdater.basicOperations; +import static org.assertj.core.api.Assertions.assertThat; + +public class BasicConsistencyTest { + + private static final Logger logger = LoggerFactory.getLogger(BasicConsistencyTest.class); + + private static final GenericContainer aerospike = getAerospikeContainer(); + + private static final AerospikeClient client = getAerospikeClient(aerospike, new NioEventLoops()); + + private static BatchOperations, AerospikeLock, Value> operations = basicOperations( + client, + AEROSPIKE_PROPERTIES.getNamespace(), "wal", + Clock.systemUTC(), + Executors.newCachedThreadPool(), + Executors.newCachedThreadPool()); + + private static BatchUpdater, AerospikeLock, Value> updater = new BatchUpdater<>(operations); + + private static String setName = String.valueOf(BasicConsistencyTest.class.hashCode()); + private static AtomicInteger keyCounter = new AtomicInteger(); + private static String BIN_NAME = "value"; + + private AtomicInteger exceptionsCount = new AtomicInteger(); + private Random random = new Random(); + private Key key1 = new Key(AEROSPIKE_PROPERTIES.getNamespace(), setName, keyCounter.incrementAndGet()); + private Key key2 = new Key(AEROSPIKE_PROPERTIES.getNamespace(), setName, keyCounter.incrementAndGet()); + + @Test + public void shouldUpdate() { + update(key1, key2); + + assertThat((Long)client.get(null, key1).getValue(BIN_NAME)).isEqualTo(1000); + assertThat((Long)client.get(null, key2).getValue(BIN_NAME)).isEqualTo(1000); + } + + @Test + public void shouldUpdateConcurrently() throws ExecutionException, InterruptedException { + ExecutorService executorService = Executors.newFixedThreadPool(2); + Future future1 = executorService.submit(() -> update(key1, key2)); + Future future2 = executorService.submit(() -> update(key1, key2)); + + future1.get(); + future2.get(); + + assertThat((Long)client.get(null, key1).getValue(BIN_NAME)).isEqualTo(2000); + assertThat((Long)client.get(null, key2).getValue(BIN_NAME)).isEqualTo(2000); + assertThat(exceptionsCount.get()).isGreaterThan(0); + } + + private void update(Key key1, Key key2){ + for(int i = 0; i < 1000; i++){ + try { + incrementBoth(key1, key2, updater, client); + } catch (LockingException e) { + exceptionsCount.incrementAndGet(); + i--; + try { + Thread.sleep(random.nextInt(25)); + } catch (InterruptedException e1) { + throw new RuntimeException(e1); + } + + logger.debug(e.getMessage()); + } + } + } + + public static void incrementBoth(Key key1, Key key2, + BatchUpdater, AerospikeLock, Value> updater, + AerospikeClient aerospikeClient) { + Long value1 = (Long)getValue(key1, aerospikeClient); + Long value2 = (Long)getValue(key2, aerospikeClient); + + long value1New = (value1 != null ? value1 : 0) + 1; + long value2New = (value2 != null ? value2 : 0) + 1; + updater.update(new AerospikeBasicBatchUpdate( + new AerospikeBasicBatchLocks(asList( + record(key1, value1), + record(key2, value2))), + asList( + record(key1, value1New), + record(key2, value2New)))); + logger.debug("updated {} from {} to {} and {} from {} to {}", key1, value1, value1New, key2, value2, value2New); + } + + public static Record record(Key key, Long value) { + return new Record(key, singletonList(new Bin(BIN_NAME, value))); + } + + public static Object getValue(Key key, AerospikeClient client){ + com.aerospike.client.Record record1 = client.get(null, key); + return record1 != null ? (Long)record1.getValue(BIN_NAME) : null; + } +} diff --git a/aerospike-batch-updater/src/test/java/nosql/batch/update/aerospike/basic/BasicRecoveryTest.java b/aerospike-batch-updater/src/test/java/nosql/batch/update/aerospike/basic/BasicRecoveryTest.java new file mode 100644 index 0000000..a88a1ac --- /dev/null +++ b/aerospike-batch-updater/src/test/java/nosql/batch/update/aerospike/basic/BasicRecoveryTest.java @@ -0,0 +1,107 @@ +package nosql.batch.update.aerospike.basic; + +import com.aerospike.client.AerospikeClient; +import com.aerospike.client.Key; +import com.aerospike.client.Value; +import com.aerospike.client.async.NioEventLoops; +import nosql.batch.update.BatchOperations; +import nosql.batch.update.BatchUpdater; +import nosql.batch.update.RecoveryTest; +import nosql.batch.update.aerospike.basic.lock.AerospikeBasicBatchLocks; +import nosql.batch.update.aerospike.lock.AerospikeLock; +import nosql.batch.update.util.FixedClock; +import nosql.batch.update.wal.CompletionStatistic; +import nosql.batch.update.wal.ExclusiveLocker; +import nosql.batch.update.wal.WriteAheadLogCompleter; +import org.testcontainers.containers.GenericContainer; + +import java.time.Duration; +import java.util.List; +import java.util.concurrent.Executors; +import java.util.concurrent.atomic.AtomicInteger; + +import static nosql.batch.update.aerospike.AerospikeTestUtils.AEROSPIKE_PROPERTIES; +import static nosql.batch.update.aerospike.AerospikeTestUtils.getAerospikeClient; +import static nosql.batch.update.aerospike.AerospikeTestUtils.getAerospikeContainer; +import static nosql.batch.update.aerospike.basic.BasicConsistencyTest.getValue; +import static nosql.batch.update.aerospike.basic.BasicConsistencyTest.incrementBoth; +import static nosql.batch.update.aerospike.basic.util.BasicHangingOperationsUtil.hangingOperations; +import static org.assertj.core.api.Assertions.assertThat; +import static org.awaitility.Awaitility.await; +import static org.awaitility.Duration.ONE_SECOND; + +public class BasicRecoveryTest extends RecoveryTest { + + static final GenericContainer aerospike = getAerospikeContainer(); + + static final NioEventLoops eventLoops = new NioEventLoops(); + static final AerospikeClient client = getAerospikeClient(aerospike, eventLoops); + + static final FixedClock clock = new FixedClock(); + + static BatchOperations, AerospikeLock, Value> operations + = hangingOperations(client, Executors.newCachedThreadPool(), clock, + hangsAcquire, hangsUpdate, hangsRelease, hangsDeleteBatchInWal); + + static BatchUpdater, AerospikeLock, Value> updater + = new BatchUpdater<>(operations); + + public static final Duration STALE_BATCHES_THRESHOLD = Duration.ofSeconds(1); + public static final int BATCH_SIZE = 100; + + static WriteAheadLogCompleter, AerospikeLock, Value> walCompleter + = new WriteAheadLogCompleter<>( + operations, STALE_BATCHES_THRESHOLD, BATCH_SIZE, + new DummyExclusiveLocker(), + Executors.newScheduledThreadPool(1)); + + static AtomicInteger keyCounter = new AtomicInteger(); + private Key key1; + private Key key2; + + @Override + protected void runUpdate() { + for(int i = 0; i < 10; i++){ + incrementBoth(key1, key2, updater, client); + } + } + + @Override + protected CompletionStatistic runCompleter(){ + clock.setTime(STALE_BATCHES_THRESHOLD.toMillis() + 1); + return walCompleter.completeHangedTransactions(); + } + + @Override + protected void checkForConsistency() { + assertThat(getValue(key1, client)).isEqualTo(getValue(key2, client)); + + await().timeout(ONE_SECOND).untilAsserted(() -> + assertThat(operations.getWriteAheadLogManager().getTimeRanges(STALE_BATCHES_THRESHOLD, BATCH_SIZE)).isEmpty()); + } + + private int setNameCounter = 0; + + @Override + protected void cleanUp() { + String setName = String.valueOf(setNameCounter++); + key1 = new Key(AEROSPIKE_PROPERTIES.getNamespace(), setName, keyCounter.incrementAndGet()); + key2 = new Key(AEROSPIKE_PROPERTIES.getNamespace(), setName, keyCounter.incrementAndGet()); + + clock.setTime(0); + } + + static class DummyExclusiveLocker implements ExclusiveLocker{ + + @Override + public boolean acquire() { + return true; + } + + @Override + public void release() {} + + @Override + public void shutdown() {} + } +} diff --git a/aerospike-batch-updater/src/test/java/nosql/batch/update/aerospike/basic/lock/AerospikeBasicFailingLockOperations.java b/aerospike-batch-updater/src/test/java/nosql/batch/update/aerospike/basic/lock/AerospikeBasicFailingLockOperations.java new file mode 100644 index 0000000..8969679 --- /dev/null +++ b/aerospike-batch-updater/src/test/java/nosql/batch/update/aerospike/basic/lock/AerospikeBasicFailingLockOperations.java @@ -0,0 +1,88 @@ +package nosql.batch.update.aerospike.basic.lock; + +import com.aerospike.client.IAerospikeClient; +import com.aerospike.client.Value; +import nosql.batch.update.aerospike.basic.Record; +import nosql.batch.update.aerospike.lock.AerospikeExpectedValuesOperations; +import nosql.batch.update.aerospike.lock.AerospikeLock; +import nosql.batch.update.aerospike.lock.AerospikeLockOperations; +import nosql.batch.update.lock.PermanentLockingException; +import nosql.batch.update.lock.TemporaryLockingException; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.util.List; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.concurrent.atomic.AtomicReference; + +import static nosql.batch.update.util.HangingUtil.selectFlaking; + + +public class AerospikeBasicFailingLockOperations + extends AerospikeLockOperations> { + + private static final Logger logger = LoggerFactory.getLogger(AerospikeBasicFailingLockOperations.class); + + private final AtomicReference failsAcquire; + private final AtomicReference failsCheckValue; + private final AtomicBoolean failsRelease; + + public AerospikeBasicFailingLockOperations(IAerospikeClient reactorClient, + ExecutorService aerospikeExecutor, + AerospikeExpectedValuesOperations> expectedValuesOperations, + AtomicReference failsAcquire, + AtomicReference failsCheckValue, + AtomicBoolean failsRelease) { + super(reactorClient, expectedValuesOperations, aerospikeExecutor); + this.failsAcquire = failsAcquire; + this.failsCheckValue = failsCheckValue; + this.failsRelease = failsRelease; + } + + @Override + protected List putLocks( + Value batchId, + AerospikeBasicBatchLocks batchLocks, + boolean checkTransactionId) throws TemporaryLockingException { + Throwable throwable = failsAcquire.get(); + if(throwable != null){ + List recordsSelected = selectFlaking(batchLocks.expectedValues(), + key -> logger.info("acquire locks failed flaking for key [{}]", key)); + + super.putLocks(batchId, + new AerospikeBasicBatchLocks(recordsSelected), + checkTransactionId); + throw throwable instanceof TemporaryLockingException + ? (TemporaryLockingException) throwable + : new RuntimeException(throwable); + } else { + return super.putLocks(batchId, batchLocks, checkTransactionId); + } + } + + @Override + protected void checkExpectedValues(AerospikeBasicBatchLocks batchLocks, List keysLocked) throws PermanentLockingException { + Throwable throwable = failsCheckValue.get(); + if(throwable != null){ + throw throwable instanceof PermanentLockingException + ? (PermanentLockingException) throwable + : new RuntimeException(throwable); + } else { + super.checkExpectedValues(batchLocks, keysLocked); + } + } + + @Override + public void release(List locks, Value batchId) { + if(failsRelease.get()){ + List partialLocks = selectFlaking(locks, + key -> logger.info("release locks failed flaking for key [{}]", key)); + super.release(partialLocks, batchId); + throw new RuntimeException(); + } else { + super.release(locks, batchId); + } + } + +} diff --git a/aerospike-batch-updater/src/test/java/nosql/batch/update/aerospike/basic/lock/AerospikeBasicHangingLockOperations.java b/aerospike-batch-updater/src/test/java/nosql/batch/update/aerospike/basic/lock/AerospikeBasicHangingLockOperations.java new file mode 100644 index 0000000..f72d70b --- /dev/null +++ b/aerospike-batch-updater/src/test/java/nosql/batch/update/aerospike/basic/lock/AerospikeBasicHangingLockOperations.java @@ -0,0 +1,47 @@ +package nosql.batch.update.aerospike.basic.lock; + +import com.aerospike.client.Value; +import nosql.batch.update.aerospike.basic.Record; +import nosql.batch.update.aerospike.lock.AerospikeLock; +import nosql.batch.update.lock.HangingLockOperations; +import nosql.batch.update.lock.LockOperations; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.util.List; +import java.util.concurrent.atomic.AtomicBoolean; + +import static nosql.batch.update.util.HangingUtil.selectFlaking; + + +public class AerospikeBasicHangingLockOperations + extends HangingLockOperations { + + private static final Logger logger = LoggerFactory.getLogger(AerospikeBasicHangingLockOperations.class); + + private AerospikeBasicHangingLockOperations(LockOperations lockOperations, + AtomicBoolean failsAcquire, AtomicBoolean failsRelease) { + super(lockOperations, failsAcquire, failsRelease); + } + + public static AerospikeBasicHangingLockOperations hangingLocks(LockOperations lockOperations, + AtomicBoolean failsAcquire, AtomicBoolean failsRelease){ + return new AerospikeBasicHangingLockOperations(lockOperations, failsAcquire, failsRelease); + } + + @Override + protected AerospikeBasicBatchLocks selectFlakingToAcquire(AerospikeBasicBatchLocks aerospikeBasicBatchLocks) { + List recordsSelected = selectFlaking(aerospikeBasicBatchLocks.expectedValues(), + key -> logger.info("acquire locks failed flaking for key [{}]", key)); + + return new AerospikeBasicBatchLocks(recordsSelected); + } + + @Override + protected List selectFlakingToRelease(List locks) { + return selectFlaking(locks, + key -> logger.info("release locks failed flaking for key [{}]", key)); + } + + +} diff --git a/aerospike-batch-updater/src/test/java/nosql/batch/update/aerospike/basic/lock/AerospikeBasicLockOperationsTest.java b/aerospike-batch-updater/src/test/java/nosql/batch/update/aerospike/basic/lock/AerospikeBasicLockOperationsTest.java new file mode 100644 index 0000000..c050939 --- /dev/null +++ b/aerospike-batch-updater/src/test/java/nosql/batch/update/aerospike/basic/lock/AerospikeBasicLockOperationsTest.java @@ -0,0 +1,65 @@ +package nosql.batch.update.aerospike.basic.lock; + +import com.aerospike.client.AerospikeClient; +import com.aerospike.client.Key; +import com.aerospike.client.Value; +import com.aerospike.client.async.NioEventLoops; +import nosql.batch.update.aerospike.lock.AerospikeLock; +import nosql.batch.update.aerospike.wal.AerospikeWriteAheadLogManager; +import nosql.batch.update.lock.LockOperationsTest; +import org.jetbrains.annotations.NotNull; +import org.testcontainers.containers.GenericContainer; + +import java.util.List; +import java.util.Set; +import java.util.concurrent.Executors; +import java.util.concurrent.atomic.AtomicInteger; +import java.util.stream.Collectors; + +import static java.util.Arrays.asList; +import static nosql.batch.update.aerospike.AerospikeTestUtils.AEROSPIKE_PROPERTIES; +import static nosql.batch.update.aerospike.AerospikeTestUtils.getAerospikeClient; +import static nosql.batch.update.aerospike.AerospikeTestUtils.getAerospikeContainer; +import static nosql.batch.update.aerospike.basic.AerospikeBasicBatchUpdater.basicLockOperations; +import static nosql.batch.update.aerospike.basic.BasicConsistencyTest.record; +import static org.assertj.core.api.Assertions.assertThat; + +public class AerospikeBasicLockOperationsTest + extends LockOperationsTest { + + static final GenericContainer aerospike = getAerospikeContainer(); + + static final NioEventLoops eventLoops = new NioEventLoops(); + static final AerospikeClient client = getAerospikeClient(aerospike, eventLoops); + + static String setName = String.valueOf(AerospikeBasicLockOperationsTest.class.hashCode()); + static AtomicInteger keyCounter = new AtomicInteger(); + private Key key1 = new Key(AEROSPIKE_PROPERTIES.getNamespace(), setName, keyCounter.incrementAndGet()); + private Key key2 = new Key(AEROSPIKE_PROPERTIES.getNamespace(), setName, keyCounter.incrementAndGet()); + + AerospikeBasicBatchLocks locks1 = new AerospikeBasicBatchLocks(asList(record(key1, null), record(key2, null))); + + public AerospikeBasicLockOperationsTest() { + super(basicLockOperations(client, Executors.newCachedThreadPool())); + } + + @Override + protected AerospikeBasicBatchLocks getLocks1() { + return locks1; + } + + @Override + protected Value generateBatchId() { + return AerospikeWriteAheadLogManager.generateBatchId(); + } + + @Override + protected void assertThatSameLockKeys(List locks1, List locks2) { + assertThat(toKeys(locks1)).containsExactlyInAnyOrderElementsOf(toKeys(locks2)); + } + + @NotNull + private Set toKeys(List locks1) { + return locks1.stream().map(l -> l.key).collect(Collectors.toSet()); + } +} diff --git a/aerospike-batch-updater/src/test/java/nosql/batch/update/aerospike/basic/util/BasicFailingOperationsUtil.java b/aerospike-batch-updater/src/test/java/nosql/batch/update/aerospike/basic/util/BasicFailingOperationsUtil.java new file mode 100644 index 0000000..18d0bbc --- /dev/null +++ b/aerospike-batch-updater/src/test/java/nosql/batch/update/aerospike/basic/util/BasicFailingOperationsUtil.java @@ -0,0 +1,56 @@ +package nosql.batch.update.aerospike.basic.util; + +import com.aerospike.client.IAerospikeClient; +import com.aerospike.client.Value; +import nosql.batch.update.BatchOperations; +import nosql.batch.update.UpdateOperations; +import nosql.batch.update.aerospike.basic.AerospikeBasicExpectedValueOperations; +import nosql.batch.update.aerospike.basic.AerospikeBasicUpdateOperations; +import nosql.batch.update.aerospike.basic.Record; +import nosql.batch.update.aerospike.basic.lock.AerospikeBasicBatchLocks; +import nosql.batch.update.aerospike.basic.lock.AerospikeBasicFailingLockOperations; +import nosql.batch.update.aerospike.lock.AerospikeLock; +import nosql.batch.update.lock.LockOperations; +import nosql.batch.update.wal.WriteAheadLogManager; + +import java.time.Clock; +import java.util.List; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.concurrent.atomic.AtomicInteger; +import java.util.concurrent.atomic.AtomicReference; + +import static nosql.batch.update.aerospike.AerospikeTestUtils.AEROSPIKE_PROPERTIES; +import static nosql.batch.update.aerospike.basic.AerospikeBasicBatchUpdater.basicWalManager; +import static nosql.batch.update.aerospike.basic.AerospikeBasicFailingUpdateOperations.failingUpdates; +import static nosql.batch.update.aerospike.wal.AerospikeFailingWriteAheadLogManager.failingWal; + +public class BasicFailingOperationsUtil { + + public static BatchOperations, AerospikeLock, Value> failingOperations( + IAerospikeClient client, + Clock clock, + ExecutorService executorService, + AtomicReference failsAcquire, + AtomicReference failsCheckValue, + AtomicBoolean failsUpdate, + AtomicBoolean failsRelease, + AtomicBoolean failsDeleteWal, + AtomicInteger deletesInProcess){ + + LockOperations lockOperations + = new AerospikeBasicFailingLockOperations(client, executorService, + new AerospikeBasicExpectedValueOperations(client), + failsAcquire, failsCheckValue, failsRelease); + + UpdateOperations> updateOperations = + failingUpdates(new AerospikeBasicUpdateOperations(client, executorService), failsUpdate); + + WriteAheadLogManager, Value> walManager + = failingWal(basicWalManager(client, AEROSPIKE_PROPERTIES.getNamespace(), "wal", clock), + failsDeleteWal, deletesInProcess); + + return new BatchOperations<>(walManager, lockOperations, updateOperations, executorService); + } + +} diff --git a/aerospike-batch-updater/src/test/java/nosql/batch/update/aerospike/basic/util/BasicHangingOperationsUtil.java b/aerospike-batch-updater/src/test/java/nosql/batch/update/aerospike/basic/util/BasicHangingOperationsUtil.java new file mode 100644 index 0000000..785290b --- /dev/null +++ b/aerospike-batch-updater/src/test/java/nosql/batch/update/aerospike/basic/util/BasicHangingOperationsUtil.java @@ -0,0 +1,51 @@ +package nosql.batch.update.aerospike.basic.util; + +import com.aerospike.client.IAerospikeClient; +import com.aerospike.client.Value; +import nosql.batch.update.BatchOperations; +import nosql.batch.update.UpdateOperations; +import nosql.batch.update.aerospike.basic.AerospikeBasicUpdateOperations; +import nosql.batch.update.aerospike.basic.Record; +import nosql.batch.update.aerospike.basic.lock.AerospikeBasicBatchLocks; +import nosql.batch.update.aerospike.lock.AerospikeLock; +import nosql.batch.update.lock.LockOperations; +import nosql.batch.update.wal.WriteAheadLogManager; + +import java.time.Clock; +import java.util.List; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.atomic.AtomicBoolean; + +import static nosql.batch.update.aerospike.AerospikeTestUtils.AEROSPIKE_PROPERTIES; +import static nosql.batch.update.aerospike.basic.AerospikeBasicBatchUpdater.basicLockOperations; +import static nosql.batch.update.aerospike.basic.AerospikeBasicBatchUpdater.basicWalManager; +import static nosql.batch.update.aerospike.basic.AerospikeBasicHangingUpdateOperations.hangingUpdates; +import static nosql.batch.update.aerospike.basic.lock.AerospikeBasicHangingLockOperations.hangingLocks; +import static nosql.batch.update.aerospike.wal.AerospikeHangingWriteAheadLogManager.hangingWal; + +public class BasicHangingOperationsUtil { + + public static BatchOperations, AerospikeLock, Value> hangingOperations( + IAerospikeClient client, + ExecutorService executorService, + Clock clock, + AtomicBoolean hangsAcquire, + AtomicBoolean hangsUpdate, + AtomicBoolean hangsRelease, + AtomicBoolean hangsDeleteWal){ + + LockOperations lockOperations + = hangingLocks(basicLockOperations(client, executorService), + hangsAcquire, hangsRelease); + + UpdateOperations> updateOperations = + hangingUpdates(new AerospikeBasicUpdateOperations(client, executorService), hangsUpdate); + + WriteAheadLogManager, Value> walManager + = hangingWal(basicWalManager(client, AEROSPIKE_PROPERTIES.getNamespace(), "wal", clock), + hangsDeleteWal); + + return new BatchOperations<>(walManager, lockOperations, updateOperations, executorService); + } + +} diff --git a/aerospike-batch-updater/src/test/java/nosql/batch/update/aerospike/basic/util/BasicOperationsUtil.java b/aerospike-batch-updater/src/test/java/nosql/batch/update/aerospike/basic/util/BasicOperationsUtil.java new file mode 100644 index 0000000..b56d0b4 --- /dev/null +++ b/aerospike-batch-updater/src/test/java/nosql/batch/update/aerospike/basic/util/BasicOperationsUtil.java @@ -0,0 +1,32 @@ +package nosql.batch.update.aerospike.basic.util; + +import com.aerospike.client.AerospikeClient; +import com.aerospike.client.Value; +import nosql.batch.update.BatchOperations; +import nosql.batch.update.aerospike.basic.Record; +import nosql.batch.update.aerospike.basic.lock.AerospikeBasicBatchLocks; +import nosql.batch.update.aerospike.lock.AerospikeLock; + +import java.time.Clock; +import java.util.List; +import java.util.concurrent.ExecutorService; + +import static nosql.batch.update.aerospike.AerospikeTestUtils.AEROSPIKE_PROPERTIES; +import static nosql.batch.update.aerospike.basic.AerospikeBasicBatchUpdater.basicOperations; + +public class BasicOperationsUtil { + + public static BatchOperations, AerospikeLock, Value> getBasicOperations( + AerospikeClient client, + Clock clock, + ExecutorService aerospikeExecutorService, + ExecutorService batchExecutorService) { + return basicOperations( + client, + AEROSPIKE_PROPERTIES.getNamespace(), "wal", + clock, + aerospikeExecutorService, + batchExecutorService); + } + +} diff --git a/aerospike-batch-updater/src/test/java/nosql/batch/update/aerospike/lock/AerospikeLockOperationsTest.java b/aerospike-batch-updater/src/test/java/nosql/batch/update/aerospike/lock/AerospikeLockOperationsTest.java new file mode 100644 index 0000000..41c6a9d --- /dev/null +++ b/aerospike-batch-updater/src/test/java/nosql/batch/update/aerospike/lock/AerospikeLockOperationsTest.java @@ -0,0 +1,63 @@ +package nosql.batch.update.aerospike.lock; + + +import com.aerospike.client.Key; +import nosql.batch.update.aerospike.lock.AerospikeLockOperations.LockResult; +import nosql.batch.update.lock.Lock; +import nosql.batch.update.lock.PermanentLockingException; +import nosql.batch.update.lock.TemporaryLockingException; +import org.junit.Test; + +import java.net.SocketTimeoutException; +import java.util.Arrays; +import java.util.List; +import java.util.concurrent.CompletableFuture; + +import static java.util.concurrent.CompletableFuture.completedFuture; +import static org.assertj.core.api.Assertions.assertThat; + +public class AerospikeLockOperationsTest { + + @Test + public void shouldSuccess(){ + + Key key1 = new Key("ns", "set", "1"); + Key key2 = new Key("ns", "set", "2"); + + List>> lockResults = Arrays.asList( + completedFuture(new LockResult<>(new AerospikeLock(Lock.LockType.LOCKED, key1))), + completedFuture(new LockResult<>(new AerospikeLock(Lock.LockType.SAME_BATCH, key2)))); + + List locked = AerospikeLockOperations.processResults(lockResults); + assertThat(locked).containsExactly( + new AerospikeLock(Lock.LockType.LOCKED, key1), + new AerospikeLock(Lock.LockType.SAME_BATCH, key2)); + } + + @Test(expected = TemporaryLockingException.class) + public void shouldFail(){ + + Key keyLocked = new Key("ns", "set", "1"); + + List>> lockResults = Arrays.asList( + completedFuture(new LockResult<>(new AerospikeLock(Lock.LockType.LOCKED, keyLocked))), + completedFuture(new LockResult<>(new TemporaryLockingException("test")))); + + AerospikeLockOperations.processResults(lockResults); + } + + @Test(expected = RuntimeException.class) + public void shouldSelectNonLockingError(){ + + Key keyLocked = new Key("ns", "set", "1"); + + List>> lockResults = Arrays.asList( + completedFuture(new LockResult<>(new AerospikeLock(Lock.LockType.LOCKED, keyLocked))), + completedFuture(new LockResult<>(new TemporaryLockingException("test"))), + completedFuture(new LockResult<>(new SocketTimeoutException("test")))); + + AerospikeLockOperations.processResults(lockResults); + } + + +} diff --git a/aerospike-batch-updater/src/test/java/nosql/batch/update/aerospike/wal/AerospikeExclusiveLockerTest.java b/aerospike-batch-updater/src/test/java/nosql/batch/update/aerospike/wal/AerospikeExclusiveLockerTest.java new file mode 100644 index 0000000..4402a4d --- /dev/null +++ b/aerospike-batch-updater/src/test/java/nosql/batch/update/aerospike/wal/AerospikeExclusiveLockerTest.java @@ -0,0 +1,52 @@ +package nosql.batch.update.aerospike.wal; + +import com.aerospike.client.AerospikeClient; +import nosql.batch.update.wal.ExclusiveLocker; +import nosql.batch.update.wal.ExclusiveLockerTest; +import org.junit.Test; +import org.testcontainers.containers.GenericContainer; + +import java.time.Duration; +import java.util.concurrent.Executors; + +import static nosql.batch.update.aerospike.AerospikeTestUtils.AEROSPIKE_PROPERTIES; +import static nosql.batch.update.aerospike.AerospikeTestUtils.getAerospikeClient; +import static nosql.batch.update.aerospike.AerospikeTestUtils.getAerospikeContainer; +import static org.assertj.core.api.Assertions.assertThat; + +public class AerospikeExclusiveLockerTest extends ExclusiveLockerTest { + + static final GenericContainer aerospike = getAerospikeContainer(); + + static final AerospikeClient client = getAerospikeClient(aerospike); + + @Override + public ExclusiveLocker getExclusiveLocker(){ + return new AerospikeExclusiveLocker(client, AEROSPIKE_PROPERTIES.getNamespace(), "test"); + } + + @Test + public void shouldUpgradeLock() throws InterruptedException { + ExclusiveLocker exclusiveLocker = getExclusiveLocker(Duration.ofSeconds(2)); + + ExclusiveLocker exclusiveLocker2 = getExclusiveLocker(Duration.ofSeconds(2)); + + assertThat(exclusiveLocker.acquire()).isTrue(); + + Thread.sleep(2500); + + assertThat(exclusiveLocker2.acquire()).isFalse(); + assertThat(exclusiveLocker.acquire()).isTrue(); + + exclusiveLocker.release(); + exclusiveLocker2.release(); + + exclusiveLocker.shutdown(); + exclusiveLocker2.shutdown(); + } + + public ExclusiveLocker getExclusiveLocker(Duration exclusiveLockTtl){ + return new AerospikeExclusiveLocker(client, AEROSPIKE_PROPERTIES.getNamespace(), "test", + Executors.newSingleThreadScheduledExecutor(), exclusiveLockTtl); + } +} diff --git a/aerospike-batch-updater/src/test/java/nosql/batch/update/aerospike/wal/AerospikeFailingWriteAheadLogManager.java b/aerospike-batch-updater/src/test/java/nosql/batch/update/aerospike/wal/AerospikeFailingWriteAheadLogManager.java new file mode 100644 index 0000000..93013ab --- /dev/null +++ b/aerospike-batch-updater/src/test/java/nosql/batch/update/aerospike/wal/AerospikeFailingWriteAheadLogManager.java @@ -0,0 +1,25 @@ +package nosql.batch.update.aerospike.wal; + +import com.aerospike.client.Value; +import nosql.batch.update.aerospike.lock.AerospikeBatchLocks; +import nosql.batch.update.wal.FailingWriteAheadLogManager; +import nosql.batch.update.wal.WriteAheadLogManager; + +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.concurrent.atomic.AtomicInteger; + +public class AerospikeFailingWriteAheadLogManager, UPDATES, EV> + extends FailingWriteAheadLogManager { + + public AerospikeFailingWriteAheadLogManager(WriteAheadLogManager writeAheadLogManager, + AtomicBoolean failsDelete, AtomicInteger deletesInProcess) { + super(writeAheadLogManager, failsDelete, deletesInProcess); + } + + public static , UPDATES, EV> + AerospikeFailingWriteAheadLogManager failingWal( + AerospikeWriteAheadLogManager writeAheadLogManager, + AtomicBoolean failsDelete, AtomicInteger deletesInProcess){ + return new AerospikeFailingWriteAheadLogManager<>(writeAheadLogManager, failsDelete, deletesInProcess); + } +} diff --git a/aerospike-batch-updater/src/test/java/nosql/batch/update/aerospike/wal/AerospikeHangingWriteAheadLogManager.java b/aerospike-batch-updater/src/test/java/nosql/batch/update/aerospike/wal/AerospikeHangingWriteAheadLogManager.java new file mode 100644 index 0000000..63ce7bd --- /dev/null +++ b/aerospike-batch-updater/src/test/java/nosql/batch/update/aerospike/wal/AerospikeHangingWriteAheadLogManager.java @@ -0,0 +1,22 @@ +package nosql.batch.update.aerospike.wal; + +import com.aerospike.client.Value; +import nosql.batch.update.aerospike.lock.AerospikeBatchLocks; +import nosql.batch.update.wal.HangingWriteAheadLogManager; +import nosql.batch.update.wal.WriteAheadLogManager; + +import java.util.concurrent.atomic.AtomicBoolean; + +public class AerospikeHangingWriteAheadLogManager, UPDATES, EV> + extends HangingWriteAheadLogManager { + + public AerospikeHangingWriteAheadLogManager(WriteAheadLogManager writeAheadLogManager, AtomicBoolean failsDelete) { + super(writeAheadLogManager, failsDelete); + } + + public static , UPDATES, EV> + AerospikeHangingWriteAheadLogManager hangingWal( + AerospikeWriteAheadLogManager writeAheadLogManager, AtomicBoolean failsDelete){ + return new AerospikeHangingWriteAheadLogManager<>(writeAheadLogManager, failsDelete); + } +} diff --git a/aerospike-batch-updater/src/test/java/nosql/batch/update/aerospike/wal/AerospikeWriteAheadLogManagerTest.java b/aerospike-batch-updater/src/test/java/nosql/batch/update/aerospike/wal/AerospikeWriteAheadLogManagerTest.java new file mode 100644 index 0000000..0deb7cf --- /dev/null +++ b/aerospike-batch-updater/src/test/java/nosql/batch/update/aerospike/wal/AerospikeWriteAheadLogManagerTest.java @@ -0,0 +1,92 @@ +package nosql.batch.update.aerospike.wal; + +import com.aerospike.client.AerospikeClient; +import com.aerospike.client.Bin; +import com.aerospike.client.Value; +import com.aerospike.client.async.NioEventLoops; +import nosql.batch.update.BatchUpdate; +import nosql.batch.update.aerospike.lock.AerospikeBatchLocks; +import nosql.batch.update.util.FixedClock; +import nosql.batch.update.wal.WriteAheadLogManagerTest; +import org.testcontainers.containers.GenericContainer; + +import java.time.Duration; +import java.util.Collection; +import java.util.List; +import java.util.Map; +import java.util.stream.Collectors; + +import static java.util.Collections.emptyList; +import static nosql.batch.update.aerospike.AerospikeTestUtils.AEROSPIKE_PROPERTIES; +import static nosql.batch.update.aerospike.AerospikeTestUtils.getAerospikeClient; +import static nosql.batch.update.aerospike.AerospikeTestUtils.getAerospikeContainer; + +public class AerospikeWriteAheadLogManagerTest extends WriteAheadLogManagerTest { + + static final GenericContainer aerospike = getAerospikeContainer(); + + static final NioEventLoops eventLoops = new NioEventLoops(); + static final AerospikeClient client = getAerospikeClient(aerospike, eventLoops); + + static final FixedClock clock = new FixedClock(); + static { + clock.setTime(1000); + } + static final Duration staleThreshold = Duration.ofMillis(100); + static final int batchSize = 100; + + static String walSetName = String.valueOf(AerospikeWriteAheadLogManagerTest.class.hashCode()); + + private static AerospikeWriteAheadLogManager, Object, Object> writeAheadLogManager + = new AerospikeWriteAheadLogManager<>( + client, AEROSPIKE_PROPERTIES.getNamespace(), walSetName, + new AerospikeBatchUpdateSerde, Object, Object>(){ + @Override + public List write(BatchUpdate batch) { + return emptyList(); + } + @Override + public BatchUpdate read(Map bins) { + return null; + } + }, + clock); + + + + @Override + protected Value saveBatch() { + return writeAheadLogManager.writeBatch( + new BatchUpdate, Object>() { + @Override + public AerospikeBatchLocks locks() { + return null; + } + + @Override + public Object updates() { + return null; + } + }); + } + + @Override + protected boolean removeBatch(Value batchId) { + return writeAheadLogManager.deleteBatch(batchId); + } + + @Override + protected void switchClockAhead() { + clock.setTime(clock.millis() + staleThreshold.toMillis() + 1); + } + + @Override + protected List getStaleBatches() { + return writeAheadLogManager.getTimeRanges(staleThreshold, batchSize).stream() + .map(writeAheadLogManager::getStaleBatchesForRange) + .flatMap(Collection::stream) + .map(record -> record.batchId) + .collect(Collectors.toList()); + } + +} diff --git a/aerospike-batch-updater/src/test/resources/log4j2.xml b/aerospike-batch-updater/src/test/resources/log4j2.xml new file mode 100644 index 0000000..edfd048 --- /dev/null +++ b/aerospike-batch-updater/src/test/resources/log4j2.xml @@ -0,0 +1,16 @@ + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/aerospike-container/pom.xml b/aerospike-container/pom.xml new file mode 100644 index 0000000..d778b06 --- /dev/null +++ b/aerospike-container/pom.xml @@ -0,0 +1,30 @@ + + + + com.playtika.nosql + batch-updater-parent + 0.0.22 + + 4.0.0 + + aerospike-container + + + + com.aerospike + aerospike-client + + + org.testcontainers + testcontainers + + + + junit + junit + compile + + + \ No newline at end of file diff --git a/aerospike-container/src/main/java/com/aerospike/AerospikeContainerUtils.java b/aerospike-container/src/main/java/com/aerospike/AerospikeContainerUtils.java new file mode 100644 index 0000000..4351f10 --- /dev/null +++ b/aerospike-container/src/main/java/com/aerospike/AerospikeContainerUtils.java @@ -0,0 +1,64 @@ +package com.aerospike; + +import com.github.dockerjava.api.model.Capability; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +import org.testcontainers.containers.GenericContainer; +import org.testcontainers.containers.wait.strategy.HostPortWaitStrategy; +import org.testcontainers.containers.wait.strategy.WaitAllStrategy; +import org.testcontainers.containers.wait.strategy.WaitStrategy; + +import java.time.Duration; + +public class AerospikeContainerUtils { + + private static final Logger log = LoggerFactory.getLogger(AerospikeContainerUtils.class); + + public static GenericContainer startAerospikeContainer(AerospikeProperties properties){ + AerospikeWaitStrategy aerospikeWaitStrategy = new AerospikeWaitStrategy(properties); + + log.info("Starting aerospike server enterprise. Docker image: {}", properties.dockerImage); + + Duration startupTimeout = Duration.ofSeconds(60); + WaitStrategy waitStrategy = new WaitAllStrategy() + .withStrategy(aerospikeWaitStrategy) + .withStrategy(new HostPortWaitStrategy()) + .withStartupTimeout(startupTimeout); + + GenericContainer aerospike = + new GenericContainer<>(properties.dockerImage) + .withExposedPorts(properties.port) + .withEnv("NAMESPACE", properties.namespace) + .withEnv("SERVICE_PORT", String.valueOf(properties.port)) + .withEnv("MEM_GB", String.valueOf(1)) + .withEnv("STORAGE_GB", String.valueOf(1)) + .withCreateContainerCmdModifier(cmd -> cmd.withCapAdd(Capability.NET_ADMIN)) + .waitingFor(waitStrategy) + .withStartupTimeout(startupTimeout); + + aerospike.start(); + configureEnterpriseServer(properties, aerospike); + return aerospike; + } + + private static void configureEnterpriseServer(AerospikeProperties properties, + GenericContainer aerospikeContainer) { + AsadmCommandExecutor asadmCommandExecutor = new AsadmCommandExecutor(aerospikeContainer); + String namespace = properties.getNamespace(); + /* + By default, the value of this metric is 90%, we set it to 100% to prevent stopping writes for the Aerospike + Enterprise container during high consumption of system memory. For the Aerospike Community Edition, this metric is not used. + Documentation: https://aerospike.com/docs/server/reference/configuration#stop-writes-sys-memory-pct + */ + log.info("Switching off 'stop-writes-sys-memory-pct'... "); + asadmCommandExecutor.execute(String.format("manage config namespace %s param stop-writes-sys-memory-pct to 100", namespace)); + log.info("Success switching off 'stop-writes-sys-memory-pct'"); + + if (properties.isDurableDelete()) { + log.info("Setting up 'disallow-expunge' to true..."); + asadmCommandExecutor.execute(String.format("manage config namespace %s param disallow-expunge to true", namespace)); + log.info("Success setting up 'disallow-expunge' to true"); + } + } + +} diff --git a/aerospike-container/src/main/java/com/aerospike/AerospikeProperties.java b/aerospike-container/src/main/java/com/aerospike/AerospikeProperties.java new file mode 100644 index 0000000..b7e2701 --- /dev/null +++ b/aerospike-container/src/main/java/com/aerospike/AerospikeProperties.java @@ -0,0 +1,82 @@ +/* +* The MIT License (MIT) +* +* Copyright (c) 2018 Playtika +* +* Permission is hereby granted, free of charge, to any person obtaining a copy +* of this software and associated documentation files (the "Software"), to deal +* in the Software without restriction, including without limitation the rights +* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +* copies of the Software, and to permit persons to whom the Software is +* furnished to do so, subject to the following conditions: +* +* The above copyright notice and this permission notice shall be included in all +* copies or substantial portions of the Software. +* +* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +* SOFTWARE. + */ +package com.aerospike; + +public class AerospikeProperties { + + boolean enabled = true; + String dockerImage = "aerospike/aerospike-server-enterprise:6.3.0.16_1"; + String namespace = "TEST"; + String host = "localhost"; + int port = 3000; + boolean durableDelete = true; + + public boolean isEnabled() { + return enabled; + } + + public void setEnabled(boolean enabled) { + this.enabled = enabled; + } + + public String getDockerImage() { + return dockerImage; + } + + public void setDockerImage(String dockerImage) { + this.dockerImage = dockerImage; + } + + public String getNamespace() { + return namespace; + } + + public void setNamespace(String namespace) { + this.namespace = namespace; + } + + public String getHost() { + return host; + } + + public void setHost(String host) { + this.host = host; + } + + public int getPort() { + return port; + } + + public void setPort(int port) { + this.port = port; + } + + public boolean isDurableDelete() { + return durableDelete; + } + + public void setDurableDelete(boolean durableDelete) { + this.durableDelete = durableDelete; + } +} diff --git a/aerospike-container/src/main/java/com/aerospike/AerospikeWaitStrategy.java b/aerospike-container/src/main/java/com/aerospike/AerospikeWaitStrategy.java new file mode 100644 index 0000000..7f427ed --- /dev/null +++ b/aerospike-container/src/main/java/com/aerospike/AerospikeWaitStrategy.java @@ -0,0 +1,75 @@ +package com.aerospike; + +import com.aerospike.client.AerospikeClient; +import com.aerospike.client.AerospikeException; +import com.github.dockerjava.api.command.InspectContainerResponse; +import com.github.dockerjava.api.model.ExposedPort; +import com.github.dockerjava.api.model.NetworkSettings; +import com.github.dockerjava.api.model.Ports; +import org.rnorth.ducttape.TimeoutException; +import org.rnorth.ducttape.unreliables.Unreliables; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +import org.testcontainers.DockerClientFactory; +import org.testcontainers.containers.ContainerLaunchException; +import org.testcontainers.containers.wait.strategy.AbstractWaitStrategy; + +import java.util.Map; +import java.util.concurrent.TimeUnit; + +public class AerospikeWaitStrategy extends AbstractWaitStrategy { + + private final Logger log = LoggerFactory.getLogger(getClass()); + + private final AerospikeProperties properties; + + public AerospikeWaitStrategy(AerospikeProperties properties) { + this.properties = properties; + } + + @Override + protected void waitUntilReady() { + long seconds = this.startupTimeout.getSeconds(); + + try { + Unreliables.retryUntilTrue((int)seconds, TimeUnit.SECONDS, + () -> this.getRateLimiter().getWhenReady(this::isReady)); + } catch (TimeoutException var4) { + throw new ContainerLaunchException(String.format("[%s] notifies that container[%s] is not ready after [%d] seconds, container cannot be started.", this.getContainerType(), this.waitStrategyTarget.getContainerId(), seconds)); + } + } + + protected boolean isReady() { + String containerId = waitStrategyTarget.getContainerId(); + log.debug("Check Aerospike container {} status", containerId); + + InspectContainerResponse containerInfo = waitStrategyTarget.getContainerInfo(); + if (containerInfo == null) { + log.debug("Aerospike container[{}] doesn't contain info. Abnormal situation, should not happen.", containerId); + return false; + } + + int port = getMappedPort(containerInfo.getNetworkSettings(), properties.port); + String host = DockerClientFactory.instance().dockerHostIpAddress(); + + //TODO: Remove dependency to client https://www.aerospike.com/docs/tools/asmonitor/common_tasks.html + try (AerospikeClient client = new AerospikeClient(host, port)) { + return client.isConnected(); + } catch (AerospikeException.Connection e) { + log.debug("Aerospike container: {} not yet started. {}", containerId, e.getMessage()); + } + return false; + } + + private int getMappedPort(NetworkSettings networkSettings, int originalPort) { + ExposedPort exposedPort = new ExposedPort(originalPort); + Ports ports = networkSettings.getPorts(); + Map bindings = ports.getBindings(); + Ports.Binding[] binding = bindings.get(exposedPort); + return Integer.valueOf(binding[0].getHostPortSpec()); + } + + protected String getContainerType() { + return this.getClass().getSimpleName(); + } +} diff --git a/aerospike-container/src/main/java/com/aerospike/AsadmCommandExecutor.java b/aerospike-container/src/main/java/com/aerospike/AsadmCommandExecutor.java new file mode 100644 index 0000000..8b82fce --- /dev/null +++ b/aerospike-container/src/main/java/com/aerospike/AsadmCommandExecutor.java @@ -0,0 +1,48 @@ +package com.aerospike; + +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +import org.testcontainers.containers.Container; +import org.testcontainers.containers.GenericContainer; + +import java.io.IOException; + +public class AsadmCommandExecutor { + + private static final Logger log = LoggerFactory.getLogger(AsadmCommandExecutor.class); + + private final GenericContainer aerospikeContainer; + + public AsadmCommandExecutor(GenericContainer aerospikeContainer) { + this.aerospikeContainer = aerospikeContainer; + } + + public void execute(String command) { + try { + Container.ExecResult result = aerospikeContainer.execInContainer("asadm", "--enable", "-e", command); + logStdout(result); + if (result.getExitCode() != 0 || isBadResponse(result)) { + throw new IllegalStateException(String.format("Failed to execute \"asadm --enable -e '%s'\": \nstdout:\n%s\nstderr:\n%s", + command, result.getStdout(), result.getStderr())); + } + } catch (Exception ex) { + throw new IllegalStateException(String.format("Failed to execute \"asadm\"", ex)); + } + } + + private boolean isBadResponse(Container.ExecResult execResult) { + String stdout = execResult.getStdout(); + /* + Example of the stdout without error: + ~Set Namespace Param stop-writes-sys-memory-pct to 100~ + Node|Response + 728bb242e58c:3000|ok + Number of rows: 1 + */ + return !stdout.contains("|ok"); + } + + private static void logStdout(Container.ExecResult result) { + log.debug("Aerospike asadm util stdout: \n{}\n{}", result.getStdout(), result.getStderr()); + } +} diff --git a/aerospike-reactor-batch-updater/pom.xml b/aerospike-reactor-batch-updater/pom.xml new file mode 100644 index 0000000..7f8c8f2 --- /dev/null +++ b/aerospike-reactor-batch-updater/pom.xml @@ -0,0 +1,123 @@ + + + + 4.0.0 + + + com.playtika.nosql + batch-updater-parent + 0.0.22 + + + aerospike-reactor-batch-updater + jar + Reactor batch updates on Aerospike + + + + + com.aerospike + aerospike-client + provided + + + + com.aerospike + aerospike-reactor-client + provided + + + + com.playtika.nosql + aerospike-batch-updater + + + + com.playtika.nosql + reactor-batch-updater + + + + com.playtika.nosql + batch-updater + + + + org.slf4j + slf4j-api + + + + + com.playtika.nosql + batch-updater + test-jar + test + + + + com.playtika.nosql + reactor-batch-updater + test-jar + test + + + + com.playtika.nosql + aerospike-batch-updater + test-jar + test + + + + com.playtika.nosql + aerospike-container + test + + + + junit + junit + test + + + + org.assertj + assertj-core + test + + + + org.apache.logging.log4j + log4j-slf4j-impl + test + + + + org.awaitility + awaitility + test + + + + + io.projectreactor.tools + blockhound + test + + + \ No newline at end of file diff --git a/aerospike-reactor-batch-updater/src/main/java/nosql/batch/update/reactor/aerospike/basic/AerospikeBasicBatchUpdater.java b/aerospike-reactor-batch-updater/src/main/java/nosql/batch/update/reactor/aerospike/basic/AerospikeBasicBatchUpdater.java new file mode 100644 index 0000000..791d59f --- /dev/null +++ b/aerospike-reactor-batch-updater/src/main/java/nosql/batch/update/reactor/aerospike/basic/AerospikeBasicBatchUpdater.java @@ -0,0 +1,55 @@ +package nosql.batch.update.reactor.aerospike.basic; + +import com.aerospike.client.IAerospikeClient; +import com.aerospike.client.Value; +import com.aerospike.client.reactor.IAerospikeReactorClient; +import nosql.batch.update.aerospike.basic.AerospikeBasicBatchUpdateSerde; +import nosql.batch.update.aerospike.basic.Record; +import nosql.batch.update.aerospike.basic.lock.AerospikeBasicBatchLocks; +import nosql.batch.update.aerospike.lock.AerospikeLock; +import nosql.batch.update.reactor.ReactorBatchOperations; +import nosql.batch.update.reactor.aerospike.lock.AerospikeReactorLockOperations; +import nosql.batch.update.reactor.aerospike.wal.AerospikeReactorWriteAheadLogManager; + +import java.time.Clock; +import java.util.List; + +public class AerospikeBasicBatchUpdater { + + public static ReactorBatchOperations, AerospikeLock, Value> basicOperations( + IAerospikeClient client, IAerospikeReactorClient reactorClient, + String walNamespace, + String walSetName, + Clock clock){ + + AerospikeReactorWriteAheadLogManager, List> walManager = + basicWalManager(client, reactorClient, walNamespace, walSetName, clock); + + AerospikeReactorLockOperations> lockOperations = + basicLockOperations(reactorClient); + + AerospikeBasicReactorUpdateOperations updateOperations = basicUpdateOperations(reactorClient); + + return new ReactorBatchOperations<>(walManager, lockOperations, updateOperations); + } + + public static AerospikeBasicReactorUpdateOperations basicUpdateOperations(IAerospikeReactorClient client) { + return new AerospikeBasicReactorUpdateOperations(client); + } + + public static AerospikeReactorLockOperations> basicLockOperations( + IAerospikeReactorClient reactorClient) { + return new AerospikeReactorLockOperations<>( + reactorClient, + new AerospikeBasicReactorExpectedValueOperations(reactorClient)); + } + + public static AerospikeReactorWriteAheadLogManager, List> basicWalManager( + IAerospikeClient client, IAerospikeReactorClient reactorClient, String walNamespace, String walSetName, Clock clock) { + return new AerospikeReactorWriteAheadLogManager<>( + client, reactorClient, walNamespace, walSetName, + new AerospikeBasicBatchUpdateSerde(), + clock); + } + +} diff --git a/aerospike-reactor-batch-updater/src/main/java/nosql/batch/update/reactor/aerospike/basic/AerospikeBasicReactorExpectedValueOperations.java b/aerospike-reactor-batch-updater/src/main/java/nosql/batch/update/reactor/aerospike/basic/AerospikeBasicReactorExpectedValueOperations.java new file mode 100644 index 0000000..fa862da --- /dev/null +++ b/aerospike-reactor-batch-updater/src/main/java/nosql/batch/update/reactor/aerospike/basic/AerospikeBasicReactorExpectedValueOperations.java @@ -0,0 +1,71 @@ +package nosql.batch.update.reactor.aerospike.basic; + +import com.aerospike.client.BatchRead; +import com.aerospike.client.Bin; +import com.aerospike.client.Value; +import com.aerospike.client.reactor.IAerospikeReactorClient; +import nosql.batch.update.aerospike.basic.Record; +import nosql.batch.update.aerospike.lock.AerospikeLock; +import nosql.batch.update.lock.Lock; +import nosql.batch.update.lock.PermanentLockingException; +import nosql.batch.update.reactor.aerospike.lock.AerospikeReactorExpectedValuesOperations; +import reactor.core.publisher.Mono; + +import java.util.ArrayList; +import java.util.List; + + +public class AerospikeBasicReactorExpectedValueOperations implements AerospikeReactorExpectedValuesOperations> { + + private final IAerospikeReactorClient client; + + public AerospikeBasicReactorExpectedValueOperations(IAerospikeReactorClient client) { + this.client = client; + } + + @Override + public Mono checkExpectedValues(List locks, List expectedValues) throws PermanentLockingException { + + if(locks.size() != expectedValues.size()){ + throw new IllegalArgumentException("locks.size() != expectedValues.size()"); + } + + List batchReads = new ArrayList<>(expectedValues.size()); + List expectedValuesToCheck = new ArrayList<>(expectedValues.size()); + for(int i = 0, n = expectedValues.size(); i < n; i++){ + if(locks.get(i).lockType == Lock.LockType.SAME_BATCH){ + continue; + } + Record record = expectedValues.get(i); + batchReads.add(new BatchRead(record.key, record.bins.stream() + .map(bin -> bin.name) + .toArray(String[]::new))); + expectedValuesToCheck.add(record); + } + + return client.get(null, batchReads) + .doOnNext(batchReadResult -> { + for(int i = 0, n = expectedValuesToCheck.size(); i < n; i++){ + checkValues(batchReads.get(i), expectedValuesToCheck.get(i)); + } + }) + .then(); + + + } + + private void checkValues(BatchRead batchRead, Record expectedValues) throws PermanentLockingException { + for(Bin bin : expectedValues.bins){ + Object actualValue = batchRead.record != null ? batchRead.record.getValue(bin.name) : null; + if(!equals(actualValue, bin.value)){ + throw new PermanentLockingException(String.format( + "Unexpected value: bin=[%s], expected=[%s], actual=[%s]", + bin.name, bin.value, actualValue)); + } + } + } + + private boolean equals(Object actualValue, Value expectedValue) { + return expectedValue.equals(Value.get(actualValue)); + } +} diff --git a/aerospike-reactor-batch-updater/src/main/java/nosql/batch/update/reactor/aerospike/basic/AerospikeBasicReactorUpdateOperations.java b/aerospike-reactor-batch-updater/src/main/java/nosql/batch/update/reactor/aerospike/basic/AerospikeBasicReactorUpdateOperations.java new file mode 100644 index 0000000..b81013d --- /dev/null +++ b/aerospike-reactor-batch-updater/src/main/java/nosql/batch/update/reactor/aerospike/basic/AerospikeBasicReactorUpdateOperations.java @@ -0,0 +1,34 @@ +package nosql.batch.update.reactor.aerospike.basic; + +import com.aerospike.client.Bin; +import com.aerospike.client.Key; +import com.aerospike.client.policy.WritePolicy; +import com.aerospike.client.reactor.IAerospikeReactorClient; +import nosql.batch.update.aerospike.basic.Record; +import nosql.batch.update.reactor.ReactorUpdateOperations; +import reactor.core.publisher.Flux; +import reactor.core.publisher.Mono; + +import java.util.List; + +public class AerospikeBasicReactorUpdateOperations implements ReactorUpdateOperations> { + + private final IAerospikeReactorClient client; + private final WritePolicy writePolicy; + + public AerospikeBasicReactorUpdateOperations(IAerospikeReactorClient client) { + this.client = client; + this.writePolicy = client.getWritePolicyDefault(); + } + + @Override + public Mono updateMany(List batchOfUpdates, boolean calledByWal) { + return Flux.fromIterable(batchOfUpdates) + .flatMap(this::update) + .then(); + } + + private Mono update(Record record){ + return client.put(writePolicy, record.key, record.bins.toArray(new Bin[0])); + } +} diff --git a/aerospike-reactor-batch-updater/src/main/java/nosql/batch/update/reactor/aerospike/basic/wal/AerospikeBasicWalCompleter.java b/aerospike-reactor-batch-updater/src/main/java/nosql/batch/update/reactor/aerospike/basic/wal/AerospikeBasicWalCompleter.java new file mode 100644 index 0000000..9f9f17b --- /dev/null +++ b/aerospike-reactor-batch-updater/src/main/java/nosql/batch/update/reactor/aerospike/basic/wal/AerospikeBasicWalCompleter.java @@ -0,0 +1,37 @@ +package nosql.batch.update.reactor.aerospike.basic.wal; + +import com.aerospike.client.Value; +import nosql.batch.update.aerospike.basic.Record; +import nosql.batch.update.aerospike.lock.AerospikeLock; +import nosql.batch.update.aerospike.wal.AerospikeExclusiveLocker; +import nosql.batch.update.reactor.ReactorBatchOperations; +import nosql.batch.update.reactor.aerospike.wal.AerospikeReactorWriteAheadLogManager; +import nosql.batch.update.reactor.wal.ReactorWriteAheadLogCompleter; +import nosql.batch.update.reactor.wal.ReactorWriteAheadLogManager; + +import java.time.Duration; +import java.util.List; +import java.util.concurrent.Executors; + +public class AerospikeBasicWalCompleter { + + public static ReactorWriteAheadLogCompleter, AerospikeLock, Value> basicCompleter( + ReactorBatchOperations, AerospikeLock, Value> batchOperations, + Duration staleBatchesThreshold, int batchSize){ + ReactorWriteAheadLogManager, Value> writeAheadLogManager + = batchOperations.getWriteAheadLogManager(); + AerospikeReactorWriteAheadLogManager aerospikeReactorWriteAheadLogManager = (AerospikeReactorWriteAheadLogManager)writeAheadLogManager; + + return new ReactorWriteAheadLogCompleter<>( + batchOperations, + staleBatchesThreshold, + batchSize, + new AerospikeExclusiveLocker( + aerospikeReactorWriteAheadLogManager.getClient(), + aerospikeReactorWriteAheadLogManager.getWalNamespace(), + aerospikeReactorWriteAheadLogManager.getWalSetName()), + Executors.newScheduledThreadPool(1) + ); + } + +} diff --git a/aerospike-reactor-batch-updater/src/main/java/nosql/batch/update/reactor/aerospike/lock/AerospikeReactorExpectedValuesOperations.java b/aerospike-reactor-batch-updater/src/main/java/nosql/batch/update/reactor/aerospike/lock/AerospikeReactorExpectedValuesOperations.java new file mode 100644 index 0000000..41c6c60 --- /dev/null +++ b/aerospike-reactor-batch-updater/src/main/java/nosql/batch/update/reactor/aerospike/lock/AerospikeReactorExpectedValuesOperations.java @@ -0,0 +1,13 @@ +package nosql.batch.update.reactor.aerospike.lock; + +import nosql.batch.update.aerospike.lock.AerospikeLock; +import nosql.batch.update.lock.PermanentLockingException; +import reactor.core.publisher.Mono; + +import java.util.List; + +public interface AerospikeReactorExpectedValuesOperations{ + + Mono checkExpectedValues(List locks, EV expectedValues) throws PermanentLockingException; + +} diff --git a/aerospike-reactor-batch-updater/src/main/java/nosql/batch/update/reactor/aerospike/lock/AerospikeReactorLockOperations.java b/aerospike-reactor-batch-updater/src/main/java/nosql/batch/update/reactor/aerospike/lock/AerospikeReactorLockOperations.java new file mode 100644 index 0000000..5170e26 --- /dev/null +++ b/aerospike-reactor-batch-updater/src/main/java/nosql/batch/update/reactor/aerospike/lock/AerospikeReactorLockOperations.java @@ -0,0 +1,205 @@ +package nosql.batch.update.reactor.aerospike.lock; + +import com.aerospike.client.AerospikeException; +import com.aerospike.client.Bin; +import com.aerospike.client.Key; +import com.aerospike.client.Record; +import com.aerospike.client.ResultCode; +import com.aerospike.client.Value; +import com.aerospike.client.policy.RecordExistsAction; +import com.aerospike.client.policy.WritePolicy; +import com.aerospike.client.reactor.IAerospikeReactorClient; +import nosql.batch.update.aerospike.lock.AerospikeBatchLocks; +import nosql.batch.update.aerospike.lock.AerospikeLock; +import nosql.batch.update.lock.LockingException; +import nosql.batch.update.lock.TemporaryLockingException; +import nosql.batch.update.reactor.lock.ReactorLockOperations; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +import reactor.core.publisher.Flux; +import reactor.core.publisher.Mono; + +import java.util.ArrayList; +import java.util.Collection; +import java.util.List; + +import static nosql.batch.update.lock.Lock.LockType.LOCKED; +import static nosql.batch.update.lock.Lock.LockType.SAME_BATCH; + +public class AerospikeReactorLockOperations, EV> + implements ReactorLockOperations { + + private static final Logger logger = LoggerFactory.getLogger(AerospikeReactorLockOperations.class); + + private static final String BATCH_ID_BIN_NAME = "batch_id"; + + private final IAerospikeReactorClient reactorClient; + private final WritePolicy putLockPolicy; + private final WritePolicy deleteLockPolicy; + private final AerospikeReactorExpectedValuesOperations expectedValuesOperations; + + public AerospikeReactorLockOperations(IAerospikeReactorClient reactorClient, + AerospikeReactorExpectedValuesOperations expectedValuesOperations) { + this.putLockPolicy = configurePutLockPolicy(reactorClient.getWritePolicyDefault()); + this.reactorClient = reactorClient; + this.deleteLockPolicy = putLockPolicy; + this.expectedValuesOperations = expectedValuesOperations; + } + + private WritePolicy configurePutLockPolicy(WritePolicy writePolicyDefault){ + WritePolicy writePolicy = new WritePolicy(writePolicyDefault); + writePolicy.recordExistsAction = RecordExistsAction.CREATE_ONLY; + writePolicy.expiration = -1; + return writePolicy; + } + + @Override + public Mono> acquire(Value batchId, LOCKS batchLocks, boolean checkBatchId) throws LockingException { + return putLocks(batchId, batchLocks, checkBatchId) + .flatMap(keysLocked -> checkExpectedValues(batchLocks, keysLocked) + .then(Mono.just(keysLocked))); + } + + protected Mono> putLocks( + Value batchId, + LOCKS batchLocks, + boolean checkTransactionId) { + + return Flux.fromIterable(batchLocks.keysToLock()) + .flatMap(lockKey -> putLock(batchId, lockKey, checkTransactionId) + .map(LockResult::new) + .onErrorResume(throwable -> Mono.just(new LockResult<>(throwable)))) + .collectList() + .flatMap(lockResults -> processResults(lockResults)); + } + + static Mono> processResults(List> lockResults) { + List locks = new ArrayList<>(lockResults.size()); + Throwable resultError = null; + for(LockResult lockResult : lockResults){ + if(lockResult.throwable != null){ + if(lockResult.throwable instanceof LockingException){ + if(resultError == null) { + resultError = lockResult.throwable; + } + } else { + //give priority to non LockingException + resultError = new RuntimeException(lockResult.throwable); + break; + } + } + locks.add(lockResult.value); + } + if(resultError != null){ + return Mono.error(resultError); + } + return Mono.just(locks); + } + + private Mono putLock(Value batchId, Key lockKey, boolean checkBatchId) { + return reactorClient.add(putLockPolicy, lockKey, new Bin(BATCH_ID_BIN_NAME, batchId)) + .map(key -> { + logger.trace("acquired lock key=[{}], batchId=[{}]", lockKey, batchId); + return new AerospikeLock(LOCKED, lockKey); + }) + .onErrorResume(AerospikeException.class, ae -> { + if (ae.getResultCode() == ResultCode.KEY_EXISTS_ERROR) { + if (checkBatchId) { + return alreadyLockedByBatch(lockKey, batchId) + .flatMap(actualBatchId -> { + if(batchId.equals(actualBatchId)){ + //check for same batch + //this is used only by WriteAheadLogCompleter to skip already locked keys + logger.info("Previously locked by this batch update key=[{}], batchId=[{}]", + lockKey, batchId); + return Mono.just(new AerospikeLock(SAME_BATCH, lockKey)); + } else { + logger.error("Locked by other batch update but not expected key=[{}], batchId=[{}], actualBatchId=[{}]", + lockKey, batchId, actualBatchId); + return Mono.error(new TemporaryLockingException(String.format( + "Locked by this batch update but not expected key=[%s], batchId=[%s], actualBatchId=[%s]", + lockKey, batchId, actualBatchId))); + } + }); + } else { + return getBatchIdOfLock(lockKey) + .flatMap(batchIdLocked -> { + logger.info("Locked by concurrent update key=[{}], batchId=[{}], batchIdLocked=[{}]", + lockKey, batchId, batchIdLocked); + return Mono.error(new TemporaryLockingException(String.format( + "Locked by concurrent update key=[%s], batchId=[%s], batchIdLocked=[%s]", + lockKey, batchId, batchIdLocked))); + }); + } + } else { + logger.error("Unexpected error while acquiring lock key=[{}], batchId=[{}]", lockKey, batchId); + return Mono.error(ae); + } + }); + } + + protected Mono checkExpectedValues(LOCKS batchLocks, List keysLocked) { + return expectedValuesOperations.checkExpectedValues(keysLocked, batchLocks.expectedValues()); + } + + private Mono getBatchIdOfLock(Key lockKey){ + return reactorClient.get(null, lockKey) + .map(keyRecord -> getBatchId(keyRecord.record)); + } + + private Value getBatchId(Record record) { + return record != null + ? Value.get(record.getValue(BATCH_ID_BIN_NAME)) : + //may have place if key get unlocked before we get response + Value.getAsNull(); + } + + private Mono alreadyLockedByBatch(Key lockKey, Value batchId) { + return getBatchIdOfLock(lockKey); + } + + @Override + public Mono> getLockedByBatchUpdate(LOCKS aerospikeBatchLocks, Value batchId) { + List keys = aerospikeBatchLocks.keysToLock(); + + Key[] keysArray = keys.toArray(new Key[0]); + return reactorClient.get(null, keysArray) + .map(keyRecords -> { + List keysFiltered = new ArrayList<>(keys.size()); + for(int i = 0, m = keysArray.length; i < m; i++){ + Record record = keyRecords.records[i]; + if(record != null && batchId.equals(getBatchId(record))){ + keysFiltered.add(new AerospikeLock(SAME_BATCH, keysArray[i])); + } + } + return keysFiltered; + }); + } + + @Override + public Mono release(Collection locks, Value batchId) { + + return Flux.fromIterable(locks) + .flatMap(lock -> reactorClient.delete(deleteLockPolicy, lock.key) + .doOnNext(key -> logger.trace("released lock key=[{}], batchId=[{}]", key, batchId)) + ) + .then(); + } + + public static class LockResult { + public final V value; + public final Throwable throwable; + + public LockResult(V value) { + this.value = value; + this.throwable = null; + } + + public LockResult(Throwable throwable) { + this.value = null; + this.throwable = throwable; + } + } + + +} diff --git a/aerospike-reactor-batch-updater/src/main/java/nosql/batch/update/reactor/aerospike/wal/AerospikeReactorWriteAheadLogManager.java b/aerospike-reactor-batch-updater/src/main/java/nosql/batch/update/reactor/aerospike/wal/AerospikeReactorWriteAheadLogManager.java new file mode 100644 index 0000000..8f551d6 --- /dev/null +++ b/aerospike-reactor-batch-updater/src/main/java/nosql/batch/update/reactor/aerospike/wal/AerospikeReactorWriteAheadLogManager.java @@ -0,0 +1,173 @@ +package nosql.batch.update.reactor.aerospike.wal; + +import com.aerospike.client.AerospikeException; +import com.aerospike.client.Bin; +import com.aerospike.client.IAerospikeClient; +import com.aerospike.client.Key; +import com.aerospike.client.Record; +import com.aerospike.client.ResultCode; +import com.aerospike.client.Value; +import com.aerospike.client.policy.RecordExistsAction; +import com.aerospike.client.policy.WritePolicy; +import com.aerospike.client.query.IndexType; +import com.aerospike.client.query.RecordSet; +import com.aerospike.client.query.Statement; +import com.aerospike.client.reactor.IAerospikeReactorClient; +import nosql.batch.update.BatchUpdate; +import nosql.batch.update.aerospike.lock.AerospikeBatchLocks; +import nosql.batch.update.aerospike.wal.AerospikeBatchUpdateSerde; +import nosql.batch.update.reactor.wal.ReactorWriteAheadLogManager; +import nosql.batch.update.wal.WalRecord; +import nosql.batch.update.wal.WalTimeRange; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +import reactor.core.publisher.Mono; + +import java.nio.ByteBuffer; +import java.time.Clock; +import java.time.Duration; +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; +import java.util.UUID; + +import static nosql.batch.update.aerospike.wal.AerospikeWriteAheadLogManager.generateBatchId; +import static nosql.batch.update.aerospike.wal.AerospikeWriteAheadLogManager.getTimeRangesForTimestamps; +import static nosql.batch.update.aerospike.wal.AerospikeWriteAheadLogManager.staleBatchesStatement; + +public class AerospikeReactorWriteAheadLogManager, UPDATES, EV> + implements ReactorWriteAheadLogManager { + + private static final Logger logger = LoggerFactory.getLogger(AerospikeReactorWriteAheadLogManager.class); + + private static final String UUID_BIN_NAME = "uuid"; + private static final String TIMESTAMP_BIN_NAME = "timestamp"; + + private final IAerospikeClient client; + private final IAerospikeReactorClient reactorClient; + private final String walNamespace; + private final String walSetName; + private final WritePolicy writePolicy; + private final WritePolicy deletePolicy; + private final AerospikeBatchUpdateSerde batchSerializer; + private final Clock clock; + + public AerospikeReactorWriteAheadLogManager(IAerospikeClient client, + IAerospikeReactorClient reactorClient, + String walNamespace, String walSetName, + AerospikeBatchUpdateSerde batchSerializer, + Clock clock) { + this.client = client; + this.reactorClient = reactorClient; + this.walNamespace = walNamespace; + this.walSetName = walSetName; + this.writePolicy = configureWritePolicy(client.getWritePolicyDefault()); + this.deletePolicy = this.writePolicy; + this.batchSerializer = batchSerializer; + this.clock = clock; + + createSecondaryIndexOnTimestamp(); + } + + private WritePolicy configureWritePolicy(WritePolicy writePolicyDefault){ + WritePolicy writePolicy = new WritePolicy(writePolicyDefault); + writePolicy.recordExistsAction = RecordExistsAction.CREATE_ONLY; + writePolicy.sendKey = true; + writePolicy.expiration = -1; + return writePolicy; + } + + @Override + public Mono writeBatch(BatchUpdate batch) { + Value batchId = generateBatchId(); + + List batchBins = batchSerializer.write(batch); + List bins = new ArrayList<>(batchBins.size() + 1); + bins.addAll(batchBins); + bins.add(new Bin(UUID_BIN_NAME, batchId)); + bins.add(new Bin(TIMESTAMP_BIN_NAME, Value.get(clock.millis()))); + + return reactorClient.put(writePolicy, + new Key(walNamespace, walSetName, batchId), + bins.toArray(new Bin[0])) + .doOnNext(key -> logger.trace("added batch to wal: {}", batchId)) + .onErrorMap(AerospikeException.class, ae -> { + if(ae.getResultCode() == ResultCode.RECORD_TOO_BIG){ + logger.error("update data size to big: {}", batchBins.stream().mapToInt(bin -> bin.value.estimateSize()).sum()); + } + return ae; + }) + .then(Mono.just(batchId)); + } + + @Override + public Mono deleteBatch(Value batchId) { + return reactorClient.delete(deletePolicy, new Key(walNamespace, walSetName, batchId)) + .map(key -> true) + .defaultIfEmpty(false); + } + + @Override + public List getTimeRanges(Duration staleThreshold, int batchSize) { + Statement statement = staleBatchesStatement(staleThreshold, walNamespace, walSetName, clock); + RecordSet recordSet = client.query(null, statement); + + List timestamps = new ArrayList<>(); + recordSet.iterator().forEachRemaining(keyRecord -> timestamps.add(keyRecord.record.getLong(TIMESTAMP_BIN_NAME))); + Collections.sort(timestamps); + + return getTimeRangesForTimestamps(timestamps, batchSize); + } + + @Override + public List> getStaleBatchesForRange(WalTimeRange timeRange) { + Statement statement = staleBatchesStatement(walNamespace, walSetName, timeRange.getFromTimestamp(), timeRange.getToTimestamp()); + RecordSet recordSet = client.query(null, statement); + + List> staleTransactions = new ArrayList<>(); + recordSet.iterator().forEachRemaining(keyRecord -> { + Record record = keyRecord.record; + staleTransactions.add(new WalRecord<>( + Value.get(record.getValue(UUID_BIN_NAME)), + record.getLong(TIMESTAMP_BIN_NAME), + batchSerializer.read(record.bins))); + }); + Collections.sort(staleTransactions); + + return staleTransactions; + } + + static byte[] getBytesFromUUID(UUID uuid) { + ByteBuffer bb = ByteBuffer.wrap(new byte[16]); + bb.putLong(uuid.getMostSignificantBits()); + bb.putLong(uuid.getLeastSignificantBits()); + + return bb.array(); + } + + private void createSecondaryIndexOnTimestamp() { + try { + String indexName = walSetName + "_timestamp"; + client.createIndex(null, walNamespace, walSetName, indexName, TIMESTAMP_BIN_NAME, IndexType.NUMERIC) + .waitTillComplete(200, 0); + } catch (AerospikeException ae) { + if(ae.getResultCode() == ResultCode.INDEX_ALREADY_EXISTS){ + logger.info("Will not create WAL secondary index as it already exists"); + } else { + throw ae; + } + } + } + + public String getWalNamespace() { + return walNamespace; + } + + public String getWalSetName() { + return walSetName; + } + + public IAerospikeClient getClient() { + return client; + } +} diff --git a/aerospike-reactor-batch-updater/src/test/java/nosql/batch/update/reactor/aerospike/AerospikeTestUtils.java b/aerospike-reactor-batch-updater/src/test/java/nosql/batch/update/reactor/aerospike/AerospikeTestUtils.java new file mode 100644 index 0000000..b2f23af --- /dev/null +++ b/aerospike-reactor-batch-updater/src/test/java/nosql/batch/update/reactor/aerospike/AerospikeTestUtils.java @@ -0,0 +1,30 @@ +package nosql.batch.update.reactor.aerospike; + +import com.aerospike.AerospikeContainerUtils; +import com.aerospike.AerospikeProperties; +import com.aerospike.client.AerospikeClient; +import com.aerospike.client.async.EventLoops; +import com.aerospike.client.policy.ClientPolicy; +import org.testcontainers.containers.GenericContainer; + +public class AerospikeTestUtils { + + public static AerospikeProperties AEROSPIKE_PROPERTIES = new AerospikeProperties(); + + public static GenericContainer getAerospikeContainer() { + return AerospikeContainerUtils.startAerospikeContainer(AEROSPIKE_PROPERTIES); + } + + public static AerospikeClient getAerospikeClient(GenericContainer aerospike) { + return getAerospikeClient(aerospike, null); + } + + public static AerospikeClient getAerospikeClient(GenericContainer aerospike, EventLoops eventLoops) { + ClientPolicy clientPolicy = new ClientPolicy(); + clientPolicy.eventLoops = eventLoops; + clientPolicy.writePolicyDefault.durableDelete = true; + return new AerospikeClient(clientPolicy, aerospike.getContainerIpAddress(), + aerospike.getMappedPort(AEROSPIKE_PROPERTIES.getPort())); + } + +} diff --git a/aerospike-reactor-batch-updater/src/test/java/nosql/batch/update/reactor/aerospike/basic/AerospikeBasicFailingUpdateOperations.java b/aerospike-reactor-batch-updater/src/test/java/nosql/batch/update/reactor/aerospike/basic/AerospikeBasicFailingUpdateOperations.java new file mode 100644 index 0000000..15ce692 --- /dev/null +++ b/aerospike-reactor-batch-updater/src/test/java/nosql/batch/update/reactor/aerospike/basic/AerospikeBasicFailingUpdateOperations.java @@ -0,0 +1,32 @@ +package nosql.batch.update.reactor.aerospike.basic; + +import nosql.batch.update.ReactorFailingUpdateOperations; +import nosql.batch.update.aerospike.basic.Record; +import nosql.batch.update.reactor.ReactorUpdateOperations; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.util.List; +import java.util.concurrent.atomic.AtomicBoolean; + +import static nosql.batch.update.util.HangingUtil.selectFlaking; + +public class AerospikeBasicFailingUpdateOperations extends ReactorFailingUpdateOperations> { + + private static final Logger logger = LoggerFactory.getLogger(AerospikeBasicFailingUpdateOperations.class); + + public AerospikeBasicFailingUpdateOperations(ReactorUpdateOperations> updateOperations, AtomicBoolean failsUpdate) { + super(updateOperations, failsUpdate); + } + + public static AerospikeBasicFailingUpdateOperations failingUpdates( + ReactorUpdateOperations> updateOperations, AtomicBoolean failsUpdate){ + return new AerospikeBasicFailingUpdateOperations(updateOperations, failsUpdate); + } + + @Override + protected List selectFlakingToUpdate(List records) { + return selectFlaking(records, + key -> logger.info("batch update failed flaking for key [{}]", key)); + } +} diff --git a/aerospike-reactor-batch-updater/src/test/java/nosql/batch/update/reactor/aerospike/basic/AerospikeBasicHangingUpdateOperations.java b/aerospike-reactor-batch-updater/src/test/java/nosql/batch/update/reactor/aerospike/basic/AerospikeBasicHangingUpdateOperations.java new file mode 100644 index 0000000..cf313b4 --- /dev/null +++ b/aerospike-reactor-batch-updater/src/test/java/nosql/batch/update/reactor/aerospike/basic/AerospikeBasicHangingUpdateOperations.java @@ -0,0 +1,32 @@ +package nosql.batch.update.reactor.aerospike.basic; + +import nosql.batch.update.ReactorHangingUpdateOperations; +import nosql.batch.update.aerospike.basic.Record; +import nosql.batch.update.reactor.ReactorUpdateOperations; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.util.List; +import java.util.concurrent.atomic.AtomicBoolean; + +import static nosql.batch.update.util.HangingUtil.selectFlaking; + +public class AerospikeBasicHangingUpdateOperations extends ReactorHangingUpdateOperations> { + + private static final Logger logger = LoggerFactory.getLogger(AerospikeBasicHangingUpdateOperations.class); + + public AerospikeBasicHangingUpdateOperations(ReactorUpdateOperations> updateOperations, AtomicBoolean failsUpdate) { + super(updateOperations, failsUpdate); + } + + public static AerospikeBasicHangingUpdateOperations hangingUpdates( + ReactorUpdateOperations> updateOperations, AtomicBoolean failsUpdate){ + return new AerospikeBasicHangingUpdateOperations(updateOperations, failsUpdate); + } + + @Override + protected List selectFlakingToUpdate(List records) { + return selectFlaking(records, + key -> logger.info("batch update failed flaking for key [{}]", key)); + } +} diff --git a/aerospike-reactor-batch-updater/src/test/java/nosql/batch/update/reactor/aerospike/basic/BaseReactorTest.java b/aerospike-reactor-batch-updater/src/test/java/nosql/batch/update/reactor/aerospike/basic/BaseReactorTest.java new file mode 100644 index 0000000..04157d0 --- /dev/null +++ b/aerospike-reactor-batch-updater/src/test/java/nosql/batch/update/reactor/aerospike/basic/BaseReactorTest.java @@ -0,0 +1,86 @@ +package nosql.batch.update.reactor.aerospike.basic; + +import org.junit.BeforeClass; +import org.junit.Test; +import reactor.blockhound.BlockHound; +import reactor.blockhound.integration.BlockHoundIntegration; +import reactor.core.publisher.Mono; +import reactor.core.scheduler.Scheduler; +import reactor.core.scheduler.Schedulers; + +import java.time.Duration; +import java.util.ServiceLoader; + +abstract public class BaseReactorTest { + + public static final boolean INSTALL_BLOCKHOUND = true; + + @BeforeClass + public static void installBlockHound() { + if (INSTALL_BLOCKHOUND) { + BlockHound.Builder builder = BlockHound.builder(); + ServiceLoader serviceLoader = ServiceLoader.load(BlockHoundIntegration.class); + serviceLoader.forEach(builder::with); + + //spring + //check fails on server side as MimeTypeUtils$ConcurrentLruCache use this.lock.readLock().lock(); + builder.allowBlockingCallsInside("org.springframework.util.MimeTypeUtils", "parseMimeType"); + //java.io.RandomAccessFile.readBytes + builder.allowBlockingCallsInside("org.springframework.http.MediaTypeFactory", "parseMimeTypes"); + + //reactor //missed in ReactorBlockHoundIntegration + builder.allowBlockingCallsInside("java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue", "peek"); + builder.allowBlockingCallsInside("java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue", "remove"); + + //netty + builder.allowBlockingCallsInside("io.netty.util.concurrent.GlobalEventExecutor", "addTask"); + builder.allowBlockingCallsInside("io.netty.util.concurrent.GlobalEventExecutor", "takeTask"); + + //jetty + builder.allowBlockingCallsInside("org.eclipse.jetty.client.AbstractConnectionPool", "acquire"); + builder.allowBlockingCallsInside("org.eclipse.jetty.client.MultiplexConnectionPool", "acquire"); + builder.allowBlockingCallsInside("org.eclipse.jetty.client.MultiplexConnectionPool", "lock"); + builder.allowBlockingCallsInside("org.eclipse.jetty.util.BlockingArrayQueue", "poll"); + builder.allowBlockingCallsInside("org.eclipse.jetty.util.BlockingArrayQueue", "offer"); + builder.allowBlockingCallsInside("org.eclipse.jetty.util.BlockingArrayQueue", "peek"); + //java.net.InMemoryCookieStore.get + builder.allowBlockingCallsInside("org.eclipse.jetty.client.HttpConnection", "normalizeRequest"); + builder.allowBlockingCallsInside("java.util.concurrent.FutureTask", "handlePossibleCancellationInterrupt"); + + //jetty http2 server + builder.allowBlockingCallsInside("org.eclipse.jetty.util.IteratingCallback", "processing"); + builder.allowBlockingCallsInside("org.eclipse.jetty.util.IteratingCallback", "iterate"); + + //java11 + builder.allowBlockingCallsInside("jdk.internal.net.http.MultiExchange", "responseAsync"); + + builder.allowBlockingCallsInside("com.sun.jmx.mbeanserver.Repository", "remove"); + builder.allowBlockingCallsInside("com.sun.jmx.mbeanserver.Repository", "contains"); + builder.allowBlockingCallsInside("com.sun.jmx.mbeanserver.Repository", "retrieve"); + builder.allowBlockingCallsInside("com.sun.jmx.mbeanserver.Repository", "addMBean"); + + + builder.install(); + } + } + + //by default we want to detect blocking calls + protected Scheduler testScheduler() { + return Schedulers.parallel(); + } + + @Test(expected = RuntimeException.class) + public void shouldFailAsBlocking() { + Mono.delay(Duration.ofSeconds(1)) + .doOnNext(it -> { + try { + Thread.sleep(10); + } catch (InterruptedException e) { + throw new RuntimeException(e); + } + }) + .block(); + } + + +} diff --git a/aerospike-reactor-batch-updater/src/test/java/nosql/batch/update/reactor/aerospike/basic/BasicBatchRetentionTest.java b/aerospike-reactor-batch-updater/src/test/java/nosql/batch/update/reactor/aerospike/basic/BasicBatchRetentionTest.java new file mode 100644 index 0000000..20a3969 --- /dev/null +++ b/aerospike-reactor-batch-updater/src/test/java/nosql/batch/update/reactor/aerospike/basic/BasicBatchRetentionTest.java @@ -0,0 +1,95 @@ +package nosql.batch.update.reactor.aerospike.basic; + +import com.aerospike.client.AerospikeClient; +import com.aerospike.client.Key; +import com.aerospike.client.Value; +import com.aerospike.client.async.NioEventLoops; +import com.aerospike.client.reactor.AerospikeReactorClient; +import com.aerospike.client.reactor.IAerospikeReactorClient; +import nosql.batch.update.BatchRetentionTest; +import nosql.batch.update.aerospike.AerospikeTestUtils; +import nosql.batch.update.aerospike.basic.Record; +import nosql.batch.update.aerospike.basic.lock.AerospikeBasicBatchLocks; +import nosql.batch.update.aerospike.lock.AerospikeLock; +import nosql.batch.update.reactor.ReactorBatchOperations; +import nosql.batch.update.reactor.ReactorBatchUpdater; +import nosql.batch.update.reactor.wal.ReactorWriteAheadLogCompleter; +import nosql.batch.update.util.FixedClock; +import nosql.batch.update.wal.CompletionStatistic; +import org.testcontainers.containers.GenericContainer; + +import java.time.Duration; +import java.util.List; +import java.util.concurrent.Executors; + +import static nosql.batch.update.reactor.aerospike.AerospikeTestUtils.getAerospikeClient; +import static nosql.batch.update.reactor.aerospike.AerospikeTestUtils.getAerospikeContainer; +import static nosql.batch.update.reactor.aerospike.basic.BasicConsistencyTest.getValue; +import static nosql.batch.update.reactor.aerospike.basic.BasicConsistencyTest.incrementBoth; +import static nosql.batch.update.reactor.aerospike.basic.util.BasicFailingOperationsUtil.failingOperations; +import static org.assertj.core.api.Assertions.assertThat; +import static org.awaitility.Awaitility.await; +import static org.awaitility.Duration.ONE_SECOND; + +public class BasicBatchRetentionTest extends BatchRetentionTest { + + static final GenericContainer aerospike = getAerospikeContainer(); + + static final NioEventLoops eventLoops = new NioEventLoops(); + static final AerospikeClient client = getAerospikeClient(aerospike, eventLoops); + static final IAerospikeReactorClient reactorClient = new AerospikeReactorClient(client, eventLoops); + + static final FixedClock clock = new FixedClock(); + + static ReactorBatchOperations, AerospikeLock, Value> operations + = failingOperations(client, reactorClient, clock, + failsAcquireLock, failsCheckValue, failsMutate, failsReleaseLock, failsDeleteBatch, deletesInProcess); + + static ReactorBatchUpdater, AerospikeLock, Value> updater + = new ReactorBatchUpdater<>(operations); + + public static final Duration STALE_BATCHES_THRESHOLD = Duration.ofSeconds(1); + public static final int BATCH_SIZE = 100; + + static ReactorWriteAheadLogCompleter, AerospikeLock, Value> walCompleter + = new ReactorWriteAheadLogCompleter<>( + operations, STALE_BATCHES_THRESHOLD, BATCH_SIZE, + new BasicRecoveryTest.DummyExclusiveLocker(), + Executors.newScheduledThreadPool(1)); + + private Key key1; + private Key key2; + + @Override + protected void runUpdate() { + for(int i = 0; i < 10; i++){ + incrementBoth(key1, key2, updater, client); + } + } + + @Override + protected void checkForConsistency() { + assertThat(getValue(key1, client)).isEqualTo(getValue(key2, client)); + + await().timeout(ONE_SECOND).untilAsserted(() -> + assertThat(operations.getWriteAheadLogManager().getTimeRanges(STALE_BATCHES_THRESHOLD, BATCH_SIZE)).isEmpty()); + } + + private int setNameCounter = 0; + @Override + protected void cleanUp() { + String setName = String.valueOf(setNameCounter++); + key1 = new Key(AerospikeTestUtils.AEROSPIKE_PROPERTIES.getNamespace(), setName, 0); + key2 = new Key(AerospikeTestUtils.AEROSPIKE_PROPERTIES.getNamespace(), setName, 1); + + clock.setTime(0); + } + + @Override + protected CompletionStatistic runCompleter() { + clock.setTime(STALE_BATCHES_THRESHOLD.toMillis() + 1); + return walCompleter.completeHangedTransactions(); + } + + +} diff --git a/aerospike-reactor-batch-updater/src/test/java/nosql/batch/update/reactor/aerospike/basic/BasicConsistencyTest.java b/aerospike-reactor-batch-updater/src/test/java/nosql/batch/update/reactor/aerospike/basic/BasicConsistencyTest.java new file mode 100644 index 0000000..8b0ba5a --- /dev/null +++ b/aerospike-reactor-batch-updater/src/test/java/nosql/batch/update/reactor/aerospike/basic/BasicConsistencyTest.java @@ -0,0 +1,132 @@ +package nosql.batch.update.reactor.aerospike.basic; + +import com.aerospike.client.AerospikeClient; +import com.aerospike.client.Bin; +import com.aerospike.client.Key; +import com.aerospike.client.Value; +import com.aerospike.client.async.NioEventLoops; +import com.aerospike.client.reactor.AerospikeReactorClient; +import com.aerospike.client.reactor.IAerospikeReactorClient; +import nosql.batch.update.aerospike.basic.AerospikeBasicBatchUpdate; +import nosql.batch.update.aerospike.basic.Record; +import nosql.batch.update.aerospike.basic.lock.AerospikeBasicBatchLocks; +import nosql.batch.update.aerospike.lock.AerospikeLock; +import nosql.batch.update.lock.LockingException; +import nosql.batch.update.reactor.ReactorBatchOperations; +import nosql.batch.update.reactor.ReactorBatchUpdater; +import org.junit.Test; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +import org.testcontainers.containers.GenericContainer; +import reactor.core.scheduler.Schedulers; + +import java.time.Clock; +import java.util.List; +import java.util.Random; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.Executors; +import java.util.concurrent.Future; +import java.util.concurrent.atomic.AtomicInteger; + +import static java.util.Arrays.asList; +import static java.util.Collections.singletonList; +import static nosql.batch.update.reactor.aerospike.AerospikeTestUtils.AEROSPIKE_PROPERTIES; +import static nosql.batch.update.reactor.aerospike.AerospikeTestUtils.getAerospikeClient; +import static nosql.batch.update.reactor.aerospike.AerospikeTestUtils.getAerospikeContainer; +import static nosql.batch.update.reactor.aerospike.basic.AerospikeBasicBatchUpdater.basicOperations; +import static org.assertj.core.api.Assertions.assertThat; + +public class BasicConsistencyTest extends BaseReactorTest{ + + private static final Logger logger = LoggerFactory.getLogger(BasicConsistencyTest.class); + + private static final GenericContainer aerospike = getAerospikeContainer(); + + private static final NioEventLoops eventLoops = new NioEventLoops(); + private static final AerospikeClient client = getAerospikeClient(aerospike, eventLoops); + private static final IAerospikeReactorClient reactorClient = new AerospikeReactorClient(client, eventLoops); + + private static ReactorBatchOperations, AerospikeLock, Value> operations = basicOperations( + client, reactorClient, + AEROSPIKE_PROPERTIES.getNamespace(), "wal", + Clock.systemUTC()); + + private static ReactorBatchUpdater, AerospikeLock, Value> updater = new ReactorBatchUpdater<>(operations); + + private static String setName = String.valueOf(BasicConsistencyTest.class.hashCode()); + private static AtomicInteger keyCounter = new AtomicInteger(); + private static String BIN_NAME = "value"; + + private AtomicInteger exceptionsCount = new AtomicInteger(); + private Random random = new Random(); + private Key key1 = new Key(AEROSPIKE_PROPERTIES.getNamespace(), setName, keyCounter.incrementAndGet()); + private Key key2 = new Key(AEROSPIKE_PROPERTIES.getNamespace(), setName, keyCounter.incrementAndGet()); + + @Test + public void shouldUpdate() { + update(key1, key2); + + assertThat((Long)client.get(null, key1).getValue(BIN_NAME)).isEqualTo(1000); + assertThat((Long)client.get(null, key2).getValue(BIN_NAME)).isEqualTo(1000); + } + + @Test + public void shouldUpdateConcurrently() throws ExecutionException, InterruptedException { + Future future1 = Executors.newFixedThreadPool(2).submit(() -> update(key1, key2)); + Future future2 = Executors.newFixedThreadPool(2).submit(() -> update(key1, key2)); + + future1.get(); + future2.get(); + + assertThat((Long)client.get(null, key1).getValue(BIN_NAME)).isEqualTo(2000); + assertThat((Long)client.get(null, key2).getValue(BIN_NAME)).isEqualTo(2000); + assertThat(exceptionsCount.get()).isGreaterThan(0); + } + + private void update(Key key1, Key key2){ + for(int i = 0; i < 1000; i++){ + try { + incrementBoth(key1, key2, updater, client); + } catch (LockingException e) { + exceptionsCount.incrementAndGet(); + i--; + try { + Thread.sleep(random.nextInt(25)); + } catch (InterruptedException e1) { + throw new RuntimeException(e1); + } + + logger.debug(e.getMessage()); + } + } + } + + public static void incrementBoth(Key key1, Key key2, + ReactorBatchUpdater, AerospikeLock, Value> updater, + AerospikeClient aerospikeClient) { + Long value1 = (Long)getValue(key1, aerospikeClient); + Long value2 = (Long)getValue(key2, aerospikeClient); + + long value1New = (value1 != null ? value1 : 0) + 1; + long value2New = (value2 != null ? value2 : 0) + 1; + updater.update(new AerospikeBasicBatchUpdate( + new AerospikeBasicBatchLocks(asList( + record(key1, value1), + record(key2, value2))), + asList( + record(key1, value1New), + record(key2, value2New)))) + .subscribeOn(Schedulers.parallel()) + .block(); + logger.debug("updated {} from {} to {} and {} from {} to {}", key1, value1, value1New, key2, value2, value2New); + } + + public static Record record(Key key, Long value) { + return new Record(key, singletonList(new Bin(BIN_NAME, value))); + } + + public static Object getValue(Key key, AerospikeClient client){ + com.aerospike.client.Record record1 = client.get(null, key); + return record1 != null ? (Long)record1.getValue(BIN_NAME) : null; + } +} diff --git a/aerospike-reactor-batch-updater/src/test/java/nosql/batch/update/reactor/aerospike/basic/BasicRecoveryTest.java b/aerospike-reactor-batch-updater/src/test/java/nosql/batch/update/reactor/aerospike/basic/BasicRecoveryTest.java new file mode 100644 index 0000000..e3f3e23 --- /dev/null +++ b/aerospike-reactor-batch-updater/src/test/java/nosql/batch/update/reactor/aerospike/basic/BasicRecoveryTest.java @@ -0,0 +1,107 @@ +package nosql.batch.update.reactor.aerospike.basic; + +import com.aerospike.client.AerospikeClient; +import com.aerospike.client.Key; +import com.aerospike.client.Value; +import com.aerospike.client.async.NioEventLoops; +import com.aerospike.client.reactor.AerospikeReactorClient; +import com.aerospike.client.reactor.IAerospikeReactorClient; +import nosql.batch.update.RecoveryTest; +import nosql.batch.update.aerospike.AerospikeTestUtils; +import nosql.batch.update.aerospike.basic.Record; +import nosql.batch.update.aerospike.basic.lock.AerospikeBasicBatchLocks; +import nosql.batch.update.aerospike.lock.AerospikeLock; +import nosql.batch.update.reactor.ReactorBatchOperations; +import nosql.batch.update.reactor.ReactorBatchUpdater; +import nosql.batch.update.reactor.wal.ReactorWriteAheadLogCompleter; +import nosql.batch.update.util.FixedClock; +import nosql.batch.update.wal.CompletionStatistic; +import nosql.batch.update.wal.ExclusiveLocker; +import org.testcontainers.containers.GenericContainer; + +import java.time.Duration; +import java.util.List; +import java.util.concurrent.Executors; + +import static nosql.batch.update.reactor.aerospike.AerospikeTestUtils.getAerospikeClient; +import static nosql.batch.update.reactor.aerospike.AerospikeTestUtils.getAerospikeContainer; +import static nosql.batch.update.reactor.aerospike.basic.BasicConsistencyTest.getValue; +import static nosql.batch.update.reactor.aerospike.basic.BasicConsistencyTest.incrementBoth; +import static nosql.batch.update.reactor.aerospike.basic.util.BasicHangingOperationsUtil.hangingOperations; +import static org.assertj.core.api.Assertions.assertThat; +import static org.awaitility.Awaitility.await; +import static org.awaitility.Duration.ONE_SECOND; + +public class BasicRecoveryTest extends RecoveryTest { + + static final GenericContainer aerospike = getAerospikeContainer(); + + static final NioEventLoops eventLoops = new NioEventLoops(); + static final AerospikeClient client = getAerospikeClient(aerospike, eventLoops); + static final IAerospikeReactorClient reactorClient = new AerospikeReactorClient(client, eventLoops); + + static final FixedClock clock = new FixedClock(); + + static ReactorBatchOperations, AerospikeLock, Value> operations + = hangingOperations(client, reactorClient, clock, hangsAcquire, hangsUpdate, hangsRelease, hangsDeleteBatchInWal); + + static ReactorBatchUpdater, AerospikeLock, Value> updater + = new ReactorBatchUpdater<>(operations); + + public static final Duration STALE_BATCHES_THRESHOLD = Duration.ofSeconds(1); + public static final int BATCH_SIZE = 100; + + static ReactorWriteAheadLogCompleter, AerospikeLock, Value> walCompleter + = new ReactorWriteAheadLogCompleter<>( + operations, STALE_BATCHES_THRESHOLD, BATCH_SIZE, + new DummyExclusiveLocker(), + Executors.newScheduledThreadPool(1)); + + private Key key1; + private Key key2; + + @Override + protected void runUpdate() { + for(int i = 0; i < 10; i++){ + incrementBoth(key1, key2, updater, client); + } + } + + @Override + protected CompletionStatistic runCompleter(){ + clock.setTime(STALE_BATCHES_THRESHOLD.toMillis() + 1); + return walCompleter.completeHangedTransactions(); + } + + @Override + protected void checkForConsistency() { + assertThat(getValue(key1, client)).isEqualTo(getValue(key2, client)); + + await().timeout(ONE_SECOND).untilAsserted(() -> + assertThat(operations.getWriteAheadLogManager().getTimeRanges(STALE_BATCHES_THRESHOLD, BATCH_SIZE)).isEmpty()); + } + + private int setNameCounter = 0; + @Override + protected void cleanUp() { + String setName = String.valueOf(setNameCounter++); + key1 = new Key(AerospikeTestUtils.AEROSPIKE_PROPERTIES.getNamespace(), setName, 0); + key2 = new Key(AerospikeTestUtils.AEROSPIKE_PROPERTIES.getNamespace(), setName, 1); + + clock.setTime(0); + } + + static class DummyExclusiveLocker implements ExclusiveLocker{ + + @Override + public boolean acquire() { + return true; + } + + @Override + public void release() {} + + @Override + public void shutdown() {} + } +} diff --git a/aerospike-reactor-batch-updater/src/test/java/nosql/batch/update/reactor/aerospike/basic/lock/AerospikeBasicFailingReactorLockOperations.java b/aerospike-reactor-batch-updater/src/test/java/nosql/batch/update/reactor/aerospike/basic/lock/AerospikeBasicFailingReactorLockOperations.java new file mode 100644 index 0000000..a83a952 --- /dev/null +++ b/aerospike-reactor-batch-updater/src/test/java/nosql/batch/update/reactor/aerospike/basic/lock/AerospikeBasicFailingReactorLockOperations.java @@ -0,0 +1,84 @@ +package nosql.batch.update.reactor.aerospike.basic.lock; + +import com.aerospike.client.Value; +import com.aerospike.client.reactor.IAerospikeReactorClient; +import nosql.batch.update.aerospike.basic.Record; +import nosql.batch.update.aerospike.basic.lock.AerospikeBasicBatchLocks; +import nosql.batch.update.aerospike.lock.AerospikeLock; +import nosql.batch.update.reactor.aerospike.lock.AerospikeReactorExpectedValuesOperations; +import nosql.batch.update.reactor.aerospike.lock.AerospikeReactorLockOperations; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +import reactor.core.Exceptions; +import reactor.core.publisher.Mono; + +import java.util.Collection; +import java.util.List; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.concurrent.atomic.AtomicReference; + +import static nosql.batch.update.util.HangingUtil.selectFlaking; + + +public class AerospikeBasicFailingReactorLockOperations + extends AerospikeReactorLockOperations> { + + private static final Logger logger = LoggerFactory.getLogger(AerospikeBasicFailingReactorLockOperations.class); + + private final AtomicReference failsAcquire; + private final AtomicReference failsCheckValue; + private final AtomicBoolean failsRelease; + + public AerospikeBasicFailingReactorLockOperations(IAerospikeReactorClient reactorClient, + AerospikeReactorExpectedValuesOperations> expectedValuesOperations, + AtomicReference failsAcquire, + AtomicReference failsCheckValue, + AtomicBoolean failsRelease) { + super(reactorClient, expectedValuesOperations); + this.failsAcquire = failsAcquire; + this.failsCheckValue = failsCheckValue; + this.failsRelease = failsRelease; + } + + @Override + protected Mono> putLocks( + Value batchId, + AerospikeBasicBatchLocks batchLocks, + boolean checkTransactionId) { + Throwable throwable = failsAcquire.get(); + if(throwable != null){ + List recordsSelected = selectFlaking(batchLocks.expectedValues(), + key -> logger.info("acquire locks failed flaking for key [{}]", key)); + + return super.putLocks(batchId, + new AerospikeBasicBatchLocks(recordsSelected), + checkTransactionId) + .then(Mono.error(Exceptions.propagate(throwable))); + } else { + return super.putLocks(batchId, batchLocks, checkTransactionId); + } + } + + @Override + protected Mono checkExpectedValues(AerospikeBasicBatchLocks batchLocks, List keysLocked) { + Throwable throwable = failsCheckValue.get(); + if(throwable != null){ + return Mono.error(throwable); + } else { + return super.checkExpectedValues(batchLocks, keysLocked); + } + } + + @Override + public Mono release(Collection locks, Value batchId) { + if(failsRelease.get()){ + Collection partialLocks = selectFlaking(locks, + key -> logger.info("release locks failed flaking for key [{}]", key)); + return super.release(partialLocks, batchId) + .then(Mono.error(new RuntimeException())); + } else { + return super.release(locks, batchId); + } + } + +} diff --git a/aerospike-reactor-batch-updater/src/test/java/nosql/batch/update/reactor/aerospike/basic/lock/AerospikeBasicHangingLockOperations.java b/aerospike-reactor-batch-updater/src/test/java/nosql/batch/update/reactor/aerospike/basic/lock/AerospikeBasicHangingLockOperations.java new file mode 100644 index 0000000..7c0ebbd --- /dev/null +++ b/aerospike-reactor-batch-updater/src/test/java/nosql/batch/update/reactor/aerospike/basic/lock/AerospikeBasicHangingLockOperations.java @@ -0,0 +1,49 @@ +package nosql.batch.update.reactor.aerospike.basic.lock; + +import com.aerospike.client.Value; +import nosql.batch.update.aerospike.basic.Record; +import nosql.batch.update.aerospike.basic.lock.AerospikeBasicBatchLocks; +import nosql.batch.update.aerospike.lock.AerospikeLock; +import nosql.batch.update.reactor.lock.ReactorHangingLockOperations; +import nosql.batch.update.reactor.lock.ReactorLockOperations; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.util.Collection; +import java.util.List; +import java.util.concurrent.atomic.AtomicBoolean; + +import static nosql.batch.update.util.HangingUtil.selectFlaking; + + +public class AerospikeBasicHangingLockOperations + extends ReactorHangingLockOperations { + + private static final Logger logger = LoggerFactory.getLogger(AerospikeBasicHangingLockOperations.class); + + private AerospikeBasicHangingLockOperations(ReactorLockOperations lockOperations, + AtomicBoolean failsAcquire, AtomicBoolean failsRelease) { + super(lockOperations, failsAcquire, failsRelease); + } + + public static AerospikeBasicHangingLockOperations hangingLocks(ReactorLockOperations lockOperations, + AtomicBoolean failsAcquire, AtomicBoolean failsRelease){ + return new AerospikeBasicHangingLockOperations(lockOperations, failsAcquire, failsRelease); + } + + @Override + protected AerospikeBasicBatchLocks selectFlakingToAcquire(AerospikeBasicBatchLocks aerospikeBasicBatchLocks) { + List recordsSelected = selectFlaking(aerospikeBasicBatchLocks.expectedValues(), + key -> logger.info("acquire locks failed flaking for key [{}]", key)); + + return new AerospikeBasicBatchLocks(recordsSelected); + } + + @Override + protected Collection selectFlakingToRelease(Collection locks) { + return selectFlaking(locks, + key -> logger.info("release locks failed flaking for key [{}]", key)); + } + + +} diff --git a/aerospike-reactor-batch-updater/src/test/java/nosql/batch/update/reactor/aerospike/basic/lock/AerospikeBasicLockOperationsTest.java b/aerospike-reactor-batch-updater/src/test/java/nosql/batch/update/reactor/aerospike/basic/lock/AerospikeBasicLockOperationsTest.java new file mode 100644 index 0000000..2a73d79 --- /dev/null +++ b/aerospike-reactor-batch-updater/src/test/java/nosql/batch/update/reactor/aerospike/basic/lock/AerospikeBasicLockOperationsTest.java @@ -0,0 +1,69 @@ +package nosql.batch.update.reactor.aerospike.basic.lock; + +import com.aerospike.client.AerospikeClient; +import com.aerospike.client.Key; +import com.aerospike.client.Value; +import com.aerospike.client.async.NioEventLoops; +import com.aerospike.client.reactor.AerospikeReactorClient; +import com.aerospike.client.reactor.IAerospikeReactorClient; +import nosql.batch.update.aerospike.basic.lock.AerospikeBasicBatchLocks; +import nosql.batch.update.aerospike.lock.AerospikeLock; +import nosql.batch.update.aerospike.wal.AerospikeWriteAheadLogManager; +import nosql.batch.update.reactor.lock.ReactorLockOperationsTest; +import org.jetbrains.annotations.NotNull; +import org.testcontainers.containers.GenericContainer; + +import java.util.List; +import java.util.Set; +import java.util.concurrent.atomic.AtomicInteger; +import java.util.stream.Collectors; + +import static java.util.Arrays.asList; +import static nosql.batch.update.reactor.aerospike.AerospikeTestUtils.AEROSPIKE_PROPERTIES; +import static nosql.batch.update.reactor.aerospike.AerospikeTestUtils.getAerospikeClient; +import static nosql.batch.update.reactor.aerospike.AerospikeTestUtils.getAerospikeContainer; +import static nosql.batch.update.reactor.aerospike.basic.AerospikeBasicBatchUpdater.basicLockOperations; +import static nosql.batch.update.reactor.aerospike.basic.BasicConsistencyTest.record; +import static org.assertj.core.api.Assertions.assertThat; + +public class AerospikeBasicLockOperationsTest + extends ReactorLockOperationsTest { + + static final GenericContainer aerospike = getAerospikeContainer(); + + static final NioEventLoops eventLoops = new NioEventLoops(); + static final AerospikeClient client = getAerospikeClient(aerospike, eventLoops); + static final IAerospikeReactorClient reactorClient = new AerospikeReactorClient(client, eventLoops); + + static String setName = String.valueOf(AerospikeBasicLockOperationsTest.class.hashCode()); + static AtomicInteger keyCounter = new AtomicInteger(); + private Key key1 = new Key(AEROSPIKE_PROPERTIES.getNamespace(), setName, keyCounter.incrementAndGet()); + private Key key2 = new Key(AEROSPIKE_PROPERTIES.getNamespace(), setName, keyCounter.incrementAndGet()); + + + AerospikeBasicBatchLocks locks1 = new AerospikeBasicBatchLocks(asList(record(key1, null), record(key2, null))); + + public AerospikeBasicLockOperationsTest() { + super(basicLockOperations(reactorClient)); + } + + @Override + protected AerospikeBasicBatchLocks getLocks1() { + return locks1; + } + + @Override + protected Value generateBatchId() { + return AerospikeWriteAheadLogManager.generateBatchId(); + } + + @Override + protected void assertThatSameLockKeys(List locks1, List locks2) { + assertThat(toKeys(locks1)).containsExactlyInAnyOrderElementsOf(toKeys(locks2)); + } + + @NotNull + private Set toKeys(List locks1) { + return locks1.stream().map(l -> l.key).collect(Collectors.toSet()); + } +} diff --git a/aerospike-reactor-batch-updater/src/test/java/nosql/batch/update/reactor/aerospike/basic/util/BasicFailingOperationsUtil.java b/aerospike-reactor-batch-updater/src/test/java/nosql/batch/update/reactor/aerospike/basic/util/BasicFailingOperationsUtil.java new file mode 100644 index 0000000..e544f66 --- /dev/null +++ b/aerospike-reactor-batch-updater/src/test/java/nosql/batch/update/reactor/aerospike/basic/util/BasicFailingOperationsUtil.java @@ -0,0 +1,55 @@ +package nosql.batch.update.reactor.aerospike.basic.util; + +import com.aerospike.client.IAerospikeClient; +import com.aerospike.client.Value; +import com.aerospike.client.reactor.IAerospikeReactorClient; +import nosql.batch.update.aerospike.basic.Record; +import nosql.batch.update.aerospike.basic.lock.AerospikeBasicBatchLocks; +import nosql.batch.update.aerospike.lock.AerospikeLock; +import nosql.batch.update.reactor.ReactorBatchOperations; +import nosql.batch.update.reactor.ReactorUpdateOperations; +import nosql.batch.update.reactor.aerospike.basic.AerospikeBasicReactorExpectedValueOperations; +import nosql.batch.update.reactor.aerospike.basic.AerospikeBasicReactorUpdateOperations; +import nosql.batch.update.reactor.aerospike.basic.lock.AerospikeBasicFailingReactorLockOperations; +import nosql.batch.update.reactor.lock.ReactorLockOperations; +import nosql.batch.update.reactor.wal.ReactorWriteAheadLogManager; + +import java.time.Clock; +import java.util.List; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.concurrent.atomic.AtomicInteger; +import java.util.concurrent.atomic.AtomicReference; + +import static nosql.batch.update.reactor.aerospike.AerospikeTestUtils.AEROSPIKE_PROPERTIES; +import static nosql.batch.update.reactor.aerospike.basic.AerospikeBasicBatchUpdater.basicWalManager; +import static nosql.batch.update.reactor.aerospike.basic.AerospikeBasicFailingUpdateOperations.failingUpdates; +import static nosql.batch.update.reactor.aerospike.wal.AerospikeFailingWriteAheadLogManager.failingWal; + +public class BasicFailingOperationsUtil { + + public static ReactorBatchOperations, AerospikeLock, Value> failingOperations( + IAerospikeClient client, IAerospikeReactorClient reactorClient, + Clock clock, + AtomicReference failsAcquire, + AtomicReference failsCheckValue, + AtomicBoolean failsUpdate, + AtomicBoolean failsRelease, + AtomicBoolean failsDeleteWal, + AtomicInteger deletesInProcess){ + + ReactorLockOperations lockOperations + = new AerospikeBasicFailingReactorLockOperations(reactorClient, + new AerospikeBasicReactorExpectedValueOperations(reactorClient), + failsAcquire, failsCheckValue, failsRelease); + + ReactorUpdateOperations> updateOperations = + failingUpdates(new AerospikeBasicReactorUpdateOperations(reactorClient), failsUpdate); + + ReactorWriteAheadLogManager, Value> walManager + = failingWal(basicWalManager(client, reactorClient, AEROSPIKE_PROPERTIES.getNamespace(), "wal", clock), + failsDeleteWal, deletesInProcess); + + return new ReactorBatchOperations<>(walManager, lockOperations, updateOperations); + } + +} diff --git a/aerospike-reactor-batch-updater/src/test/java/nosql/batch/update/reactor/aerospike/basic/util/BasicHangingOperationsUtil.java b/aerospike-reactor-batch-updater/src/test/java/nosql/batch/update/reactor/aerospike/basic/util/BasicHangingOperationsUtil.java new file mode 100644 index 0000000..b99a566 --- /dev/null +++ b/aerospike-reactor-batch-updater/src/test/java/nosql/batch/update/reactor/aerospike/basic/util/BasicHangingOperationsUtil.java @@ -0,0 +1,50 @@ +package nosql.batch.update.reactor.aerospike.basic.util; + +import com.aerospike.client.IAerospikeClient; +import com.aerospike.client.Value; +import com.aerospike.client.reactor.IAerospikeReactorClient; +import nosql.batch.update.aerospike.basic.Record; +import nosql.batch.update.aerospike.basic.lock.AerospikeBasicBatchLocks; +import nosql.batch.update.aerospike.lock.AerospikeLock; +import nosql.batch.update.reactor.ReactorBatchOperations; +import nosql.batch.update.reactor.ReactorUpdateOperations; +import nosql.batch.update.reactor.aerospike.basic.AerospikeBasicReactorUpdateOperations; +import nosql.batch.update.reactor.lock.ReactorLockOperations; +import nosql.batch.update.reactor.wal.ReactorWriteAheadLogManager; + +import java.time.Clock; +import java.util.List; +import java.util.concurrent.atomic.AtomicBoolean; + +import static nosql.batch.update.reactor.aerospike.AerospikeTestUtils.AEROSPIKE_PROPERTIES; +import static nosql.batch.update.reactor.aerospike.basic.AerospikeBasicBatchUpdater.basicLockOperations; +import static nosql.batch.update.reactor.aerospike.basic.AerospikeBasicBatchUpdater.basicWalManager; +import static nosql.batch.update.reactor.aerospike.basic.AerospikeBasicHangingUpdateOperations.hangingUpdates; +import static nosql.batch.update.reactor.aerospike.basic.lock.AerospikeBasicHangingLockOperations.hangingLocks; +import static nosql.batch.update.reactor.aerospike.wal.AerospikeHangingWriteAheadLogManager.hangingWal; + +public class BasicHangingOperationsUtil { + + public static ReactorBatchOperations, AerospikeLock, Value> hangingOperations( + IAerospikeClient client, IAerospikeReactorClient reactorClient, + Clock clock, + AtomicBoolean hangsAcquire, + AtomicBoolean hangsUpdate, + AtomicBoolean hangsRelease, + AtomicBoolean hangsDeleteWal){ + + ReactorLockOperations lockOperations + = hangingLocks(basicLockOperations(reactorClient), + hangsAcquire, hangsRelease); + + ReactorUpdateOperations> updateOperations = + hangingUpdates(new AerospikeBasicReactorUpdateOperations(reactorClient), hangsUpdate); + + ReactorWriteAheadLogManager, Value> walManager + = hangingWal(basicWalManager(client, reactorClient, AEROSPIKE_PROPERTIES.getNamespace(), "wal", clock), + hangsDeleteWal); + + return new ReactorBatchOperations<>(walManager, lockOperations, updateOperations); + } + +} diff --git a/aerospike-reactor-batch-updater/src/test/java/nosql/batch/update/reactor/aerospike/basic/util/BasicOperationsUtil.java b/aerospike-reactor-batch-updater/src/test/java/nosql/batch/update/reactor/aerospike/basic/util/BasicOperationsUtil.java new file mode 100644 index 0000000..65e1d25 --- /dev/null +++ b/aerospike-reactor-batch-updater/src/test/java/nosql/batch/update/reactor/aerospike/basic/util/BasicOperationsUtil.java @@ -0,0 +1,27 @@ +package nosql.batch.update.reactor.aerospike.basic.util; + +import com.aerospike.client.AerospikeClient; +import com.aerospike.client.Value; +import com.aerospike.client.reactor.IAerospikeReactorClient; +import nosql.batch.update.aerospike.basic.Record; +import nosql.batch.update.aerospike.basic.lock.AerospikeBasicBatchLocks; +import nosql.batch.update.aerospike.lock.AerospikeLock; +import nosql.batch.update.reactor.ReactorBatchOperations; + +import java.time.Clock; +import java.util.List; + +import static nosql.batch.update.reactor.aerospike.AerospikeTestUtils.AEROSPIKE_PROPERTIES; +import static nosql.batch.update.reactor.aerospike.basic.AerospikeBasicBatchUpdater.basicOperations; + +public class BasicOperationsUtil { + + public static ReactorBatchOperations, AerospikeLock, Value> getBasicOperations( + AerospikeClient client, IAerospikeReactorClient reactorClient, Clock clock) { + return basicOperations( + client, reactorClient, + AEROSPIKE_PROPERTIES.getNamespace(), "wal", + clock); + } + +} diff --git a/aerospike-reactor-batch-updater/src/test/java/nosql/batch/update/reactor/aerospike/lock/AerospikeReactorLockOperationsTest.java b/aerospike-reactor-batch-updater/src/test/java/nosql/batch/update/reactor/aerospike/lock/AerospikeReactorLockOperationsTest.java new file mode 100644 index 0000000..0362eba --- /dev/null +++ b/aerospike-reactor-batch-updater/src/test/java/nosql/batch/update/reactor/aerospike/lock/AerospikeReactorLockOperationsTest.java @@ -0,0 +1,62 @@ +package nosql.batch.update.reactor.aerospike.lock; + + +import com.aerospike.client.Key; +import nosql.batch.update.aerospike.lock.AerospikeLock; +import nosql.batch.update.lock.Lock; +import nosql.batch.update.lock.PermanentLockingException; +import nosql.batch.update.lock.TemporaryLockingException; +import nosql.batch.update.reactor.aerospike.lock.AerospikeReactorLockOperations.LockResult; +import org.junit.Test; + +import java.net.SocketTimeoutException; +import java.util.Arrays; +import java.util.List; + +import static org.assertj.core.api.Assertions.assertThat; + +public class AerospikeReactorLockOperationsTest { + + @Test + public void shouldSuccess(){ + + Key key1 = new Key("ns", "set", "1"); + Key key2 = new Key("ns", "set", "2"); + + List> lockResults = Arrays.asList( + new LockResult<>(new AerospikeLock(Lock.LockType.LOCKED, key1)), + new LockResult<>(new AerospikeLock(Lock.LockType.SAME_BATCH, key2))); + + List locked = AerospikeReactorLockOperations.processResults(lockResults).block(); + assertThat(locked).containsExactly( + new AerospikeLock(Lock.LockType.LOCKED, key1), + new AerospikeLock(Lock.LockType.SAME_BATCH, key2)); + } + + @Test(expected = TemporaryLockingException.class) + public void shouldFail(){ + + Key keyLocked = new Key("ns", "set", "1"); + + List> lockResults = Arrays.asList( + new LockResult<>(new AerospikeLock(Lock.LockType.LOCKED, keyLocked)), + new LockResult<>(new TemporaryLockingException("test"))); + + AerospikeReactorLockOperations.processResults(lockResults).block(); + } + + @Test(expected = RuntimeException.class) + public void shouldSelectNonLockingError(){ + + Key keyLocked = new Key("ns", "set", "1"); + + List> lockResults = Arrays.asList( + new LockResult<>(new AerospikeLock(Lock.LockType.LOCKED, keyLocked)), + new LockResult<>(new TemporaryLockingException("test")), + new LockResult<>(new SocketTimeoutException("test"))); + + AerospikeReactorLockOperations.processResults(lockResults).block(); + } + + +} diff --git a/aerospike-reactor-batch-updater/src/test/java/nosql/batch/update/reactor/aerospike/wal/AerospikeFailingWriteAheadLogManager.java b/aerospike-reactor-batch-updater/src/test/java/nosql/batch/update/reactor/aerospike/wal/AerospikeFailingWriteAheadLogManager.java new file mode 100644 index 0000000..fa4cc1e --- /dev/null +++ b/aerospike-reactor-batch-updater/src/test/java/nosql/batch/update/reactor/aerospike/wal/AerospikeFailingWriteAheadLogManager.java @@ -0,0 +1,25 @@ +package nosql.batch.update.reactor.aerospike.wal; + +import com.aerospike.client.Value; +import nosql.batch.update.aerospike.lock.AerospikeBatchLocks; +import nosql.batch.update.reactor.wal.ReactorFailingWriteAheadLogManager; +import nosql.batch.update.reactor.wal.ReactorWriteAheadLogManager; + +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.concurrent.atomic.AtomicInteger; + +public class AerospikeFailingWriteAheadLogManager, UPDATES, EV> + extends ReactorFailingWriteAheadLogManager { + + public AerospikeFailingWriteAheadLogManager(ReactorWriteAheadLogManager writeAheadLogManager, + AtomicBoolean failsDelete, AtomicInteger deletesInProcess) { + super(writeAheadLogManager, failsDelete, deletesInProcess); + } + + public static , UPDATES, EV> + AerospikeFailingWriteAheadLogManager failingWal( + AerospikeReactorWriteAheadLogManager writeAheadLogManager, + AtomicBoolean failsDelete, AtomicInteger deletesInProcess){ + return new AerospikeFailingWriteAheadLogManager<>(writeAheadLogManager, failsDelete, deletesInProcess); + } +} diff --git a/aerospike-reactor-batch-updater/src/test/java/nosql/batch/update/reactor/aerospike/wal/AerospikeHangingWriteAheadLogManager.java b/aerospike-reactor-batch-updater/src/test/java/nosql/batch/update/reactor/aerospike/wal/AerospikeHangingWriteAheadLogManager.java new file mode 100644 index 0000000..94e2dc3 --- /dev/null +++ b/aerospike-reactor-batch-updater/src/test/java/nosql/batch/update/reactor/aerospike/wal/AerospikeHangingWriteAheadLogManager.java @@ -0,0 +1,22 @@ +package nosql.batch.update.reactor.aerospike.wal; + +import com.aerospike.client.Value; +import nosql.batch.update.aerospike.lock.AerospikeBatchLocks; +import nosql.batch.update.reactor.wal.ReactorHangingWriteAheadLogManager; +import nosql.batch.update.reactor.wal.ReactorWriteAheadLogManager; + +import java.util.concurrent.atomic.AtomicBoolean; + +public class AerospikeHangingWriteAheadLogManager, UPDATES, EV> + extends ReactorHangingWriteAheadLogManager { + + public AerospikeHangingWriteAheadLogManager(ReactorWriteAheadLogManager writeAheadLogManager, AtomicBoolean failsDelete) { + super(writeAheadLogManager, failsDelete); + } + + public static , UPDATES, EV> + AerospikeHangingWriteAheadLogManager hangingWal( + AerospikeReactorWriteAheadLogManager writeAheadLogManager, AtomicBoolean failsDelete){ + return new AerospikeHangingWriteAheadLogManager<>(writeAheadLogManager, failsDelete); + } +} diff --git a/aerospike-reactor-batch-updater/src/test/java/nosql/batch/update/reactor/aerospike/wal/AerospikeReactorWriteAheadLogManagerTest.java b/aerospike-reactor-batch-updater/src/test/java/nosql/batch/update/reactor/aerospike/wal/AerospikeReactorWriteAheadLogManagerTest.java new file mode 100644 index 0000000..bfc4b5b --- /dev/null +++ b/aerospike-reactor-batch-updater/src/test/java/nosql/batch/update/reactor/aerospike/wal/AerospikeReactorWriteAheadLogManagerTest.java @@ -0,0 +1,96 @@ +package nosql.batch.update.reactor.aerospike.wal; + +import com.aerospike.client.AerospikeClient; +import com.aerospike.client.Bin; +import com.aerospike.client.Value; +import com.aerospike.client.async.NioEventLoops; +import com.aerospike.client.reactor.AerospikeReactorClient; +import com.aerospike.client.reactor.IAerospikeReactorClient; +import nosql.batch.update.BatchUpdate; +import nosql.batch.update.aerospike.lock.AerospikeBatchLocks; +import nosql.batch.update.aerospike.wal.AerospikeBatchUpdateSerde; +import nosql.batch.update.util.FixedClock; +import nosql.batch.update.wal.WriteAheadLogManagerTest; +import org.testcontainers.containers.GenericContainer; + +import java.time.Duration; +import java.util.Collection; +import java.util.List; +import java.util.Map; +import java.util.stream.Collectors; + +import static java.util.Collections.emptyList; +import static nosql.batch.update.reactor.aerospike.AerospikeTestUtils.AEROSPIKE_PROPERTIES; +import static nosql.batch.update.reactor.aerospike.AerospikeTestUtils.getAerospikeClient; +import static nosql.batch.update.reactor.aerospike.AerospikeTestUtils.getAerospikeContainer; + +public class AerospikeReactorWriteAheadLogManagerTest extends WriteAheadLogManagerTest { + + static final GenericContainer aerospike = getAerospikeContainer(); + + static final NioEventLoops eventLoops = new NioEventLoops(); + static final AerospikeClient client = getAerospikeClient(aerospike, eventLoops); + private static final IAerospikeReactorClient reactorClient = new AerospikeReactorClient(client, eventLoops); + + static final FixedClock clock = new FixedClock(); + static { + clock.setTime(1000); + } + static final Duration staleThreshold = Duration.ofMillis(100); + static final int batchSize = 100; + + static String walSetName = String.valueOf(AerospikeReactorWriteAheadLogManagerTest.class.hashCode()); + + private static AerospikeReactorWriteAheadLogManager, Object, Object> writeAheadLogManager + = new AerospikeReactorWriteAheadLogManager<>( + client, reactorClient, AEROSPIKE_PROPERTIES.getNamespace(), walSetName, + new AerospikeBatchUpdateSerde, Object, Object>(){ + @Override + public List write(BatchUpdate batch) { + return emptyList(); + } + @Override + public BatchUpdate read(Map bins) { + return null; + } + }, + clock); + + + + @Override + protected Value saveBatch() { + return writeAheadLogManager.writeBatch( + new BatchUpdate, Object>() { + @Override + public AerospikeBatchLocks locks() { + return null; + } + + @Override + public Object updates() { + return null; + } + }).block(); + } + + @Override + protected boolean removeBatch(Value batchId) { + return writeAheadLogManager.deleteBatch(batchId).block(); + } + + @Override + protected void switchClockAhead() { + clock.setTime(clock.millis() + staleThreshold.toMillis() + 1); + } + + @Override + protected List getStaleBatches() { + return writeAheadLogManager.getTimeRanges(staleThreshold, batchSize).stream() + .map(writeAheadLogManager::getStaleBatchesForRange) + .flatMap(Collection::stream) + .map(record -> record.batchId) + .collect(Collectors.toList()); + } + +} diff --git a/aerospike-reactor-batch-updater/src/test/resources/log4j2.xml b/aerospike-reactor-batch-updater/src/test/resources/log4j2.xml new file mode 100644 index 0000000..edfd048 --- /dev/null +++ b/aerospike-reactor-batch-updater/src/test/resources/log4j2.xml @@ -0,0 +1,16 @@ + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/batch-updater/pom.xml b/batch-updater/pom.xml index 82fec7a..518f31a 100644 --- a/batch-updater/pom.xml +++ b/batch-updater/pom.xml @@ -18,27 +18,29 @@ 4.0.0 - io.kptfh.nosql + com.playtika.nosql batch-updater-parent - 0.0.1 + 0.0.22 batch-updater jar Batch updates on NoSql DBs - - ${project.basedir}/.. - - + + io.projectreactor + reactor-core + + org.slf4j slf4j-api + junit junit @@ -57,6 +59,11 @@ test - + + org.awaitility + awaitility + test + + \ No newline at end of file diff --git a/batch-updater/src/main/java/nosql/batch/update/BatchOperations.java b/batch-updater/src/main/java/nosql/batch/update/BatchOperations.java index 3435993..ae49014 100644 --- a/batch-updater/src/main/java/nosql/batch/update/BatchOperations.java +++ b/batch-updater/src/main/java/nosql/batch/update/BatchOperations.java @@ -2,46 +2,77 @@ import nosql.batch.update.lock.Lock; import nosql.batch.update.lock.LockOperations; -import nosql.batch.update.wal.TransactionId; +import nosql.batch.update.lock.LockingException; import nosql.batch.update.wal.WriteAheadLogManager; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; -import java.util.Collection; import java.util.List; -import java.util.Set; +import java.util.concurrent.ExecutorService; -public class BatchOperations { +public class BatchOperations { - private final WriteAheadLogManager writeAheadLogManager; - private final LockOperations lockOperations; - private final UpdateOperations updateOperations; + private static final Logger logger = LoggerFactory.getLogger(BatchOperations.class); - public BatchOperations(WriteAheadLogManager writeAheadLogManager, - LockOperations lockOperations, - UpdateOperations updateOperations) { + private final WriteAheadLogManager writeAheadLogManager; + private final LockOperations lockOperations; + private final UpdateOperations updateOperations; + private final ExecutorService executorService; + + public BatchOperations(WriteAheadLogManager writeAheadLogManager, + LockOperations lockOperations, + UpdateOperations updateOperations, + ExecutorService executorService) { this.writeAheadLogManager = writeAheadLogManager; this.lockOperations = lockOperations; this.updateOperations = updateOperations; + this.executorService = executorService; } - public void processAndDeleteTransaction(TransactionId transactionId, BatchUpdate batchUpdate, boolean checkTransactionId) { - Set locked = lockOperations.acquire(transactionId, batchUpdate.locks(), checkTransactionId, - locksToRelease -> releaseLocksAndDeleteWalTransaction(locksToRelease, transactionId)); + public void processAndDeleteTransaction(BATCH_ID batchId, BatchUpdate batchUpdate, boolean calledByWal) { + List locksAcquired; + try { + locksAcquired = lockOperations.acquire(batchId, batchUpdate.locks(), calledByWal); + } catch (LockingException lockingException) { + if (logger.isTraceEnabled()) { + logger.trace("Failed to acquire locks [{}] batchId=[{}]. Will release locks", batchId, batchUpdate.locks()); + } + releaseLocksAndDeleteWalTransactionOnError(batchUpdate.locks(), batchId); + throw lockingException; + } + + updateOperations.updateMany(batchUpdate.updates(), calledByWal); + + if(logger.isTraceEnabled()){ + logger.trace("Applied updates [{}] batchId=[{}]", batchId, batchUpdate); + } - updateOperations.updateMany(batchUpdate.updates()); - releaseLocksAndDeleteWalTransaction(locked, transactionId); + releaseLocksAndDeleteWalTransaction(locksAcquired, batchId); } - private void releaseLocksAndDeleteWalTransaction(Collection locks, TransactionId transactionId) { - lockOperations.release(locks); - writeAheadLogManager.deleteTransaction(transactionId); + private void releaseLocksAndDeleteWalTransaction(List locks, BATCH_ID batchId) { + lockOperations.release(locks, batchId); + //here we use fire&forget to reduce response time + executorService.submit(() -> { + try { + boolean deleted = writeAheadLogManager.deleteBatch(batchId); + if(deleted) { + logger.trace("Removed batch from WAL: {}", batchId); + } else { + logger.error("Missed batch in WAL: {}", batchId); + } + } catch (Throwable t) { + logger.error("Error while removing batch from WAL", t); + } + }); } - public void releaseLocksAndDeleteWalTransactionOnError(L locks, TransactionId transactionId) { - List transactionLockKeys = lockOperations.getLockedByTransaction(locks, transactionId); - releaseLocksAndDeleteWalTransaction(transactionLockKeys, transactionId); + public void releaseLocksAndDeleteWalTransactionOnError(LOCKS locks, BATCH_ID batchId) { + List transactionLockKeys = lockOperations.getLockedByBatchUpdate(locks, batchId); + releaseLocksAndDeleteWalTransaction(transactionLockKeys, batchId); } - public WriteAheadLogManager getWriteAheadLogManager() { + public WriteAheadLogManager getWriteAheadLogManager() { return writeAheadLogManager; } diff --git a/batch-updater/src/main/java/nosql/batch/update/BatchUpdate.java b/batch-updater/src/main/java/nosql/batch/update/BatchUpdate.java index d6fc587..f568ec7 100644 --- a/batch-updater/src/main/java/nosql/batch/update/BatchUpdate.java +++ b/batch-updater/src/main/java/nosql/batch/update/BatchUpdate.java @@ -1,11 +1,11 @@ package nosql.batch.update; -public interface BatchUpdate { +public interface BatchUpdate { /** * @return Locks that should be acquired before applying batch of updated. - * After locks acquired we should check for expected values + * After locks acquired we should check for expected values if post lock approach used */ - L locks(); - U updates(); + LOCKS locks(); + UPDATES updates(); } diff --git a/batch-updater/src/main/java/nosql/batch/update/BatchUpdater.java b/batch-updater/src/main/java/nosql/batch/update/BatchUpdater.java index 8e152f6..593ffad 100644 --- a/batch-updater/src/main/java/nosql/batch/update/BatchUpdater.java +++ b/batch-updater/src/main/java/nosql/batch/update/BatchUpdater.java @@ -1,26 +1,50 @@ package nosql.batch.update; -import nosql.batch.update.wal.TransactionId; +import nosql.batch.update.lock.Lock; import nosql.batch.update.wal.WriteAheadLogManager; - -import java.util.Collection; -import java.util.List; -import java.util.Set; - -public class BatchUpdater { - - private final WriteAheadLogManager writeAheadLogManager; - private BatchOperations batchOperations; - - public BatchUpdater(BatchOperations batchOperations) { +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +/** + * Used to run batch updates on NoSql storage. Initially it was developed for Aerospike but may be implemented for any. + * Updates should be idempotent so WriteAheadLogCompleter can safely complete interrupted batch + * There is 2 approaches in batch updates PRE_LOCK and POST_LOCK + * + * PRE_LOCK - used if you know in advance all records (keys) that should be updated + * It takes the following steps to complete batch update + * 1) Lock keys + * 2) Apply updates + * 3) Unlock keys + * + * POST_LOCK - used if you don't know in advance all records (keys) that should be updated. + * It takes the following steps to complete batch update + * 1) Prepare updates + * 2) Lock keys + * 3) Check expected values (to guarantee that no concurrent changes while running updates and acquiring locks) + * 4) Apply updates + * 5) Unlock keys + * + * @param + * @param + * @param + * @param + */ +public class BatchUpdater { + + private static final Logger logger = LoggerFactory.getLogger(BatchUpdater.class); + + private final WriteAheadLogManager writeAheadLogManager; + private BatchOperations batchOperations; + + public BatchUpdater(BatchOperations batchOperations) { this.batchOperations = batchOperations; this.writeAheadLogManager = batchOperations.getWriteAheadLogManager(); } - public void update(BatchUpdate batchUpdate) { - TransactionId transactionId = writeAheadLogManager.writeTransaction(batchUpdate); - - batchOperations.processAndDeleteTransaction(transactionId, batchUpdate, false); + public void update(BatchUpdate batchUpdate) { + BATCH_ID batchId = writeAheadLogManager.writeBatch(batchUpdate); + logger.trace("Added batch to WAL: {}", batchId); + batchOperations.processAndDeleteTransaction(batchId, batchUpdate, false); } } diff --git a/batch-updater/src/main/java/nosql/batch/update/UpdateOperations.java b/batch-updater/src/main/java/nosql/batch/update/UpdateOperations.java index df4a137..b2cc319 100644 --- a/batch-updater/src/main/java/nosql/batch/update/UpdateOperations.java +++ b/batch-updater/src/main/java/nosql/batch/update/UpdateOperations.java @@ -1,6 +1,6 @@ package nosql.batch.update; -public interface UpdateOperations { +public interface UpdateOperations { - void updateMany(U batchOfUpdates); + void updateMany(UPDATES batchOfUpdates, boolean calledByWal); } diff --git a/batch-updater/src/main/java/nosql/batch/update/lock/FailedToAcquireAllLocksException.java b/batch-updater/src/main/java/nosql/batch/update/lock/FailedToAcquireAllLocksException.java deleted file mode 100644 index c37eb36..0000000 --- a/batch-updater/src/main/java/nosql/batch/update/lock/FailedToAcquireAllLocksException.java +++ /dev/null @@ -1,4 +0,0 @@ -package nosql.batch.update.lock; - -public class FailedToAcquireAllLocksException extends RuntimeException{ -} diff --git a/batch-updater/src/main/java/nosql/batch/update/lock/Lock.java b/batch-updater/src/main/java/nosql/batch/update/lock/Lock.java index 6146524..43f56fd 100644 --- a/batch-updater/src/main/java/nosql/batch/update/lock/Lock.java +++ b/batch-updater/src/main/java/nosql/batch/update/lock/Lock.java @@ -8,12 +8,12 @@ protected Lock(LockType lockType) { this.lockType = lockType; } - enum LockType { - /*Locked in scope of the transaction*/ + public enum LockType { + /*Locked in scope of the batch update*/ LOCKED, - /**Signals that was already locked in scope of interrupted transaction. - Used only by WriteAheadLogCompleter*/ - SAME_TRANSACTION + /**Signals that was already locked in scope of interrupted batch update. + Used only by WriteAheadLogCompleter*/ + SAME_BATCH } } diff --git a/batch-updater/src/main/java/nosql/batch/update/lock/LockOperations.java b/batch-updater/src/main/java/nosql/batch/update/lock/LockOperations.java index 7d7f965..2de5f33 100644 --- a/batch-updater/src/main/java/nosql/batch/update/lock/LockOperations.java +++ b/batch-updater/src/main/java/nosql/batch/update/lock/LockOperations.java @@ -1,28 +1,20 @@ package nosql.batch.update.lock; -import nosql.batch.update.wal.TransactionId; - -import java.util.Collection; import java.util.List; -import java.util.Set; -import java.util.function.Consumer; -public interface LockOperations { +public interface LockOperations { /** * - * @param transactionId + * @param batchId * @param locks - * @param checkTransactionId - * - * @param onErrorCleaner In case we were not able to get all locks we should clean(unlock) them + * @param checkBatchId * @return */ - Set acquire(TransactionId transactionId, - L locks, boolean checkTransactionId, - Consumer> onErrorCleaner); + List acquire(BATCH_ID batchId, + LOCKS locks, boolean checkBatchId) throws LockingException; - List getLockedByTransaction(L locks, TransactionId transactionId); + List getLockedByBatchUpdate(LOCKS locks, BATCH_ID batchId); - void release(Collection locks); + void release(List locks, BATCH_ID batchId); } diff --git a/batch-updater/src/main/java/nosql/batch/update/lock/LockingException.java b/batch-updater/src/main/java/nosql/batch/update/lock/LockingException.java new file mode 100644 index 0000000..708d8a8 --- /dev/null +++ b/batch-updater/src/main/java/nosql/batch/update/lock/LockingException.java @@ -0,0 +1,13 @@ +package nosql.batch.update.lock; + +abstract public class LockingException extends RuntimeException{ + + public LockingException(Throwable cause){ + super(cause); + } + + public LockingException(String message){ + super(message); + } + +} diff --git a/batch-updater/src/main/java/nosql/batch/update/lock/PermanentLockingException.java b/batch-updater/src/main/java/nosql/batch/update/lock/PermanentLockingException.java new file mode 100644 index 0000000..2488a10 --- /dev/null +++ b/batch-updater/src/main/java/nosql/batch/update/lock/PermanentLockingException.java @@ -0,0 +1,17 @@ +package nosql.batch.update.lock; + +/** + * Thrown if expected value check failed + * Indicates that batch update may not be retried + */ +public class PermanentLockingException extends LockingException{ + + public PermanentLockingException(Throwable cause){ + super(cause); + } + + public PermanentLockingException(String message){ + super(message); + } + +} diff --git a/batch-updater/src/main/java/nosql/batch/update/lock/TemporaryLockingException.java b/batch-updater/src/main/java/nosql/batch/update/lock/TemporaryLockingException.java new file mode 100644 index 0000000..846727c --- /dev/null +++ b/batch-updater/src/main/java/nosql/batch/update/lock/TemporaryLockingException.java @@ -0,0 +1,14 @@ +package nosql.batch.update.lock; + +/** + * Thrown if some locks already locked by concurrent batch update + * Indicates that batch update may be retried later + */ +public class TemporaryLockingException extends LockingException{ + + public TemporaryLockingException(String message){ + super(message); + } + + +} diff --git a/batch-updater/src/main/java/nosql/batch/update/util/AsyncUtil.java b/batch-updater/src/main/java/nosql/batch/update/util/AsyncUtil.java index 1ac0c64..7ca2636 100644 --- a/batch-updater/src/main/java/nosql/batch/update/util/AsyncUtil.java +++ b/batch-updater/src/main/java/nosql/batch/update/util/AsyncUtil.java @@ -1,16 +1,34 @@ package nosql.batch.update.util; +import java.time.Duration; +import java.util.List; +import java.util.concurrent.CompletableFuture; import java.util.concurrent.ExecutorService; import java.util.concurrent.TimeUnit; +import java.util.function.Supplier; +import java.util.stream.Collectors; + +import static java.util.concurrent.CompletableFuture.supplyAsync; public class AsyncUtil { - public static boolean shutdownAndAwaitTermination( - ExecutorService service, long timeout, TimeUnit unit) { + public static List supplyAsyncAll(List> suppliers, ExecutorService executorService){ + List> futures = suppliers.stream() + .map(supplier -> supplyAsync(supplier, executorService)) + .collect(Collectors.toList()); + return CompletableFuture.allOf(futures.toArray(new CompletableFuture[0])) + .thenApply(aVoid -> futures.stream() + .map(CompletableFuture::join) + .collect(Collectors.toList())).join(); + } + + private static final long WAIT_TIMEOUT_IN_NANOS = Duration.ofSeconds(10).toNanos(); + + public static boolean shutdownAndAwaitTermination(ExecutorService service) { // Disable new tasks from being submitted service.shutdown(); try { - long halfTimeoutNanos = TimeUnit.NANOSECONDS.convert(timeout, unit) / 2; + long halfTimeoutNanos = WAIT_TIMEOUT_IN_NANOS / 2; // Wait for half the duration of the timeout for existing tasks to terminate if (!service.awaitTermination(halfTimeoutNanos, TimeUnit.NANOSECONDS)) { // Cancel currently executing tasks diff --git a/batch-updater/src/main/java/nosql/batch/update/wal/AbstractWriteAheadLogCompleter.java b/batch-updater/src/main/java/nosql/batch/update/wal/AbstractWriteAheadLogCompleter.java new file mode 100644 index 0000000..df97781 --- /dev/null +++ b/batch-updater/src/main/java/nosql/batch/update/wal/AbstractWriteAheadLogCompleter.java @@ -0,0 +1,154 @@ +package nosql.batch.update.wal; + +import nosql.batch.update.lock.LockingException; +import nosql.batch.update.util.AsyncUtil; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.time.Duration; +import java.util.List; +import java.util.concurrent.ScheduledExecutorService; +import java.util.concurrent.ScheduledFuture; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicBoolean; + +/** + * Completes hanged transactions + */ +abstract public class AbstractWriteAheadLogCompleter { + + private static final Logger logger = LoggerFactory.getLogger(AbstractWriteAheadLogCompleter.class); + + private final Duration staleBatchesThreshold; + private final int batchSize; + + private final ExclusiveLocker exclusiveLocker; + private final ScheduledExecutorService scheduledExecutorService; + private ScheduledFuture scheduledFuture; + + private final AtomicBoolean suspended = new AtomicBoolean(false); + + /** + * @param staleBatchesThreshold + * @param batchSize + * @param exclusiveLocker + * @param scheduledExecutorService + */ + public AbstractWriteAheadLogCompleter(Duration staleBatchesThreshold, + int batchSize, + ExclusiveLocker exclusiveLocker, + ScheduledExecutorService scheduledExecutorService){ + this.staleBatchesThreshold = staleBatchesThreshold; + this.batchSize = batchSize; + this.exclusiveLocker = exclusiveLocker; + this.scheduledExecutorService = scheduledExecutorService; + } + + public void start(){ + scheduledFuture = scheduledExecutorService.scheduleAtFixedRate( + this::completeHangedTransactions, + //set period to be slightly longer then expiration + 0, staleBatchesThreshold.toMillis() + 100, TimeUnit.MILLISECONDS); + } + + public void shutdown(){ + scheduledFuture.cancel(true); + AsyncUtil.shutdownAndAwaitTermination(scheduledExecutorService); + exclusiveLocker.release(); + exclusiveLocker.shutdown(); + } + + /** + * You should call it when the data center had been switched into the passive mode + */ + public void suspend(){ + logger.info("WAL completion is suspended"); + suspended.set(true); + exclusiveLocker.release(); + } + + public boolean isSuspended(){ + return this.suspended.get(); + } + + /** + * You should call it when the data center had been switched into the active mode + */ + public void resume(){ + logger.info("WAL completion is resumed"); + this.suspended.set(false); + } + + public CompletionStatistic completeHangedTransactions() { + + if(suspended.get()){ + logger.info("WAL completion was suspended"); + return new CompletionStatistic(0, 0, 0, 0); + } + + int staleBatchesCount = 0; + int completeBatchesCount = 0; + int ignoredBatchesCount = 0; + int errorBatchesCount = 0; + try { + if(exclusiveLocker.acquire()) { + List timeRanges = getTimeRanges(staleBatchesThreshold, batchSize); + logger.info("Got {} chunks of stale transactions. Max chunk size {}", timeRanges.size(), batchSize); + + for (WalTimeRange timeRange : timeRanges) { + List> staleBatches = getStaleBatchesForRange(timeRange); + staleBatchesCount += staleBatches.size(); + logger.info("Processing {} stale transactions", staleBatches.size()); + for(WalRecord batch : staleBatches){ + if(suspended.get()){ + logger.info("WAL completion was suspended"); + break; + } + if(Thread.currentThread().isInterrupted()){ + logger.info("WAL completion was interrupted"); + break; + } + + if(exclusiveLocker.acquire()) { + logger.info("Trying to complete batch batchId=[{}], timestamp=[{}]", + batch.batchId, batch.timestamp); + try { + processAndDeleteTransactions(batch); + completeBatchesCount++; + logger.info("Successfully complete batch batchId=[{}]", batch.batchId); + } + //this is expected behaviour that may have place in case of hanged transaction was not completed: + //not able to acquire all locks (didn't match expected value + // (may have place if initial transaction was interrupted on release stage and released values were modified)) + catch (LockingException be) { + logger.info("Failed to complete batch batchId=[{}] as it's already completed", batch.batchId, be); + releaseLocksAndDeleteWalTransactionOnError(batch); + ignoredBatchesCount ++; + logger.info("released locks for batch batchId=[{}]", batch.batchId, be); + } + //even in case of error need to move to the next one + catch (Exception e) { + errorBatchesCount ++; + logger.error("!!! Failed to complete batch batchId=[{}], need to be investigated", batch.batchId, e); + } + } + } + } + } + } + catch (Throwable t) { + logger.error("Error while running completeHangedTransactions()", t); + } + + return new CompletionStatistic(staleBatchesCount, completeBatchesCount, ignoredBatchesCount, errorBatchesCount); + } + + abstract protected void releaseLocksAndDeleteWalTransactionOnError(WalRecord batch); + + abstract protected void processAndDeleteTransactions(WalRecord batch); + + abstract protected List getTimeRanges(Duration staleBatchesThreshold, int batchSize); + + abstract protected List> getStaleBatchesForRange(WalTimeRange timeRange); + +} diff --git a/batch-updater/src/main/java/nosql/batch/update/wal/CompletionStatistic.java b/batch-updater/src/main/java/nosql/batch/update/wal/CompletionStatistic.java new file mode 100644 index 0000000..7f7d80e --- /dev/null +++ b/batch-updater/src/main/java/nosql/batch/update/wal/CompletionStatistic.java @@ -0,0 +1,17 @@ +package nosql.batch.update.wal; + +public class CompletionStatistic { + + public final int staleBatchesFound; + public final int staleBatchesComplete; + public final int staleBatchesIgnored; + public final int staleBatchesErrors; + + public CompletionStatistic(int staleBatchesFound, int staleBatchesComplete, int staleBatchesIgnored, int staleBatchesErrors) { + this.staleBatchesFound = staleBatchesFound; + this.staleBatchesComplete = staleBatchesComplete; + this.staleBatchesIgnored = staleBatchesIgnored; + this.staleBatchesErrors = staleBatchesErrors; + } + +} diff --git a/batch-updater/src/main/java/nosql/batch/update/wal/ExclusiveLocker.java b/batch-updater/src/main/java/nosql/batch/update/wal/ExclusiveLocker.java index be62d11..2c1c246 100644 --- a/batch-updater/src/main/java/nosql/batch/update/wal/ExclusiveLocker.java +++ b/batch-updater/src/main/java/nosql/batch/update/wal/ExclusiveLocker.java @@ -2,6 +2,7 @@ public interface ExclusiveLocker { - boolean acquireExclusiveLock(); - boolean renewExclusiveLock(); + boolean acquire(); + void release(); + void shutdown(); } diff --git a/batch-updater/src/main/java/nosql/batch/update/wal/TransactionId.java b/batch-updater/src/main/java/nosql/batch/update/wal/TransactionId.java deleted file mode 100644 index 7298dec..0000000 --- a/batch-updater/src/main/java/nosql/batch/update/wal/TransactionId.java +++ /dev/null @@ -1,4 +0,0 @@ -package nosql.batch.update.wal; - -public interface TransactionId { -} diff --git a/batch-updater/src/main/java/nosql/batch/update/wal/WalRecord.java b/batch-updater/src/main/java/nosql/batch/update/wal/WalRecord.java new file mode 100644 index 0000000..c13697d --- /dev/null +++ b/batch-updater/src/main/java/nosql/batch/update/wal/WalRecord.java @@ -0,0 +1,22 @@ +package nosql.batch.update.wal; + +import nosql.batch.update.BatchUpdate; + +public final class WalRecord implements Comparable{ + + public final BATCH_ID batchId; + public final long timestamp; + public final BatchUpdate batchUpdate; + + public WalRecord(BATCH_ID batchId, long timestamp, BatchUpdate batchUpdate) { + this.batchId = batchId; + this.timestamp = timestamp; + this.batchUpdate = batchUpdate; + } + + @Override + public int compareTo(WalRecord transaction) { + return Long.compare(timestamp, transaction.timestamp); + } + +} diff --git a/batch-updater/src/main/java/nosql/batch/update/wal/WalTimeRange.java b/batch-updater/src/main/java/nosql/batch/update/wal/WalTimeRange.java new file mode 100644 index 0000000..4116684 --- /dev/null +++ b/batch-updater/src/main/java/nosql/batch/update/wal/WalTimeRange.java @@ -0,0 +1,19 @@ +package nosql.batch.update.wal; + +public final class WalTimeRange { + public final long fromTimestamp; + public final long toTimestamp; + + public WalTimeRange(long fromTimestamp, long toTimestamp) { + this.fromTimestamp = fromTimestamp; + this.toTimestamp = toTimestamp; + } + + public long getFromTimestamp() { + return fromTimestamp; + } + + public long getToTimestamp() { + return toTimestamp; + } +} diff --git a/batch-updater/src/main/java/nosql/batch/update/wal/WalTransaction.java b/batch-updater/src/main/java/nosql/batch/update/wal/WalTransaction.java deleted file mode 100644 index 754f150..0000000 --- a/batch-updater/src/main/java/nosql/batch/update/wal/WalTransaction.java +++ /dev/null @@ -1,25 +0,0 @@ -package nosql.batch.update.wal; - -import nosql.batch.update.BatchUpdate; - -abstract class WalTransaction implements Comparable{ - - final TransactionId transactionId; - final long timestamp; - final BatchUpdate batch; - - public WalTransaction(TransactionId transactionId, long timestamp, BatchUpdate batch) { - this.transactionId = transactionId; - this.timestamp = timestamp; - this.batch = batch; - } - - @Override - public int compareTo(WalTransaction transaction) { - return Long.compare(timestamp, transaction.timestamp); - } - - public BatchUpdate getBatchUpdate() { - return batch; - } -} diff --git a/batch-updater/src/main/java/nosql/batch/update/wal/WriteAheadLogCompleter.java b/batch-updater/src/main/java/nosql/batch/update/wal/WriteAheadLogCompleter.java index 33d3c4c..a4f8421 100644 --- a/batch-updater/src/main/java/nosql/batch/update/wal/WriteAheadLogCompleter.java +++ b/batch-updater/src/main/java/nosql/batch/update/wal/WriteAheadLogCompleter.java @@ -1,130 +1,56 @@ package nosql.batch.update.wal; import nosql.batch.update.BatchOperations; -import nosql.batch.update.lock.FailedToAcquireAllLocksException; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; +import nosql.batch.update.lock.Lock; import java.time.Duration; import java.util.List; import java.util.concurrent.ScheduledExecutorService; -import java.util.concurrent.TimeUnit; -import java.util.concurrent.atomic.AtomicBoolean; - -import static nosql.batch.update.util.AsyncUtil.shutdownAndAwaitTermination; /** * Completes hanged transactions */ -public class WriteAheadLogCompleter { - - private static Logger logger = LoggerFactory.getLogger(WriteAheadLogCompleter.class); - - private static final long WAIT_TIMEOUT_IN_SECONDS = 10; - - private final WriteAheadLogManager writeAheadLogManager; - private final Duration delayBeforeCompletion; - private final BatchOperations batchOperations; - private final ExclusiveLocker exclusiveLocker; - private final ScheduledExecutorService scheduledExecutorService; +public class WriteAheadLogCompleter + extends AbstractWriteAheadLogCompleter{ - private AtomicBoolean suspended = new AtomicBoolean(false); + private final WriteAheadLogManager writeAheadLogManager; + private final BatchOperations batchOperations; /** * @param batchOperations - * @param delayBeforeCompletion set period to be slightly longer then expiration + * @param staleBatchesThreshold * @param exclusiveLocker * @param scheduledExecutorService */ - public WriteAheadLogCompleter(BatchOperations batchOperations, Duration delayBeforeCompletion, + public WriteAheadLogCompleter(BatchOperations batchOperations, + Duration staleBatchesThreshold, + int batchSize, ExclusiveLocker exclusiveLocker, ScheduledExecutorService scheduledExecutorService){ + super(staleBatchesThreshold, batchSize, exclusiveLocker, scheduledExecutorService); this.writeAheadLogManager = batchOperations.getWriteAheadLogManager(); this.batchOperations = batchOperations; - - this.delayBeforeCompletion = delayBeforeCompletion; - this.exclusiveLocker = exclusiveLocker; - this.scheduledExecutorService = scheduledExecutorService; - } - - public void start(){ - scheduledExecutorService.scheduleAtFixedRate( - this::completeHangedTransactions, - 0, delayBeforeCompletion.toMillis(), TimeUnit.MILLISECONDS); - } - - public void shutdown(){ - shutdownAndAwaitTermination(scheduledExecutorService, WAIT_TIMEOUT_IN_SECONDS, TimeUnit.SECONDS); } - /** - * You should call it when the data center had been switched into the passive mode - */ - public void suspend(){ - this.suspended.set(true); + @Override + protected void releaseLocksAndDeleteWalTransactionOnError(WalRecord batch) { + batchOperations.releaseLocksAndDeleteWalTransactionOnError( + batch.batchUpdate.locks(), batch.batchId); } - public boolean isSuspended(){ - return this.suspended.get(); + @Override + protected void processAndDeleteTransactions(WalRecord batch) { + batchOperations.processAndDeleteTransaction( + batch.batchId, batch.batchUpdate, true); } - /** - * You should call it when the data center had been switched into the active mode - */ - public void resume(){ - this.suspended.set(false); + @Override + protected List getTimeRanges(Duration staleBatchesThreshold, int batchSize) { + return writeAheadLogManager.getTimeRanges(staleBatchesThreshold, batchSize); } - private void completeHangedTransactions() { - - if(suspended.get()){ - logger.info("WAL execution was suspended"); - return; - } - - try { - if(exclusiveLocker.acquireExclusiveLock()){ - List> staleTransactions = writeAheadLogManager.getStaleTransactions(); - logger.info("Got {} stale transactions", staleTransactions.size()); - for(WalTransaction transaction : staleTransactions){ - if(suspended.get()){ - logger.info("WAL execution was suspended"); - break; - } - if(Thread.currentThread().isInterrupted()){ - logger.info("WAL execution was interrupted"); - break; - } - - if(exclusiveLocker.renewExclusiveLock()) { - logger.info("Trying to complete transaction txId=[{}], timestamp=[{}]", - transaction.transactionId, transaction.timestamp); - try { - batchOperations.processAndDeleteTransaction( - transaction.transactionId, transaction.locks, transaction.updates, true); - logger.info("Successfully complete transaction txId=[{}]", transaction.transactionId); - } - //this is expected behaviour that may have place in case of hanged transaction was not completed: - //not able to acquire all locks (didn't match expected value - // (may have place if initial transaction was interrupted on release stage and released values were modified)) - catch (FailedToAcquireAllLocksException be) { - logger.info("Failed to complete transaction txId=[{}] as it's already completed", transaction.transactionId, be); - batchOperations.releaseLocksAndDeleteWalTransactionOnError( - transaction.locks, transaction.transactionId); - logger.info("released locks for transaction txId=[{}]", transaction.transactionId, be); - } - //even in case of error need to move to the next one - catch (Exception e) { - logger.error("!!! Failed to complete transaction txId=[{}], need to be investigated", - transaction.transactionId, e); - } - } - } - } - } - catch (Throwable t) { - logger.error("Error while running completeHangedTransactions()", t); - throw t; - } + @Override + protected List> getStaleBatchesForRange(WalTimeRange timeRange) { + return writeAheadLogManager.getStaleBatchesForRange(timeRange); } } diff --git a/batch-updater/src/main/java/nosql/batch/update/wal/WriteAheadLogManager.java b/batch-updater/src/main/java/nosql/batch/update/wal/WriteAheadLogManager.java index 076eafd..82342f5 100644 --- a/batch-updater/src/main/java/nosql/batch/update/wal/WriteAheadLogManager.java +++ b/batch-updater/src/main/java/nosql/batch/update/wal/WriteAheadLogManager.java @@ -3,14 +3,17 @@ import nosql.batch.update.BatchUpdate; +import java.time.Duration; import java.util.List; -public interface WriteAheadLogManager { +public interface WriteAheadLogManager { - TransactionId writeTransaction(BatchUpdate batch); + BATCH_ID writeBatch(BatchUpdate batch); - void deleteTransaction(TransactionId transactionId); + boolean deleteBatch(BATCH_ID batchId); - List> getStaleTransactions(); + List getTimeRanges(Duration staleThreshold, int batchSize); + + List> getStaleBatchesForRange(WalTimeRange timeRange); } diff --git a/batch-updater/src/test/java/nosql/batch/update/BatchRetentionTest.java b/batch-updater/src/test/java/nosql/batch/update/BatchRetentionTest.java new file mode 100644 index 0000000..364bc0f --- /dev/null +++ b/batch-updater/src/test/java/nosql/batch/update/BatchRetentionTest.java @@ -0,0 +1,116 @@ +package nosql.batch.update; + +import nosql.batch.update.lock.PermanentLockingException; +import nosql.batch.update.lock.TemporaryLockingException; +import nosql.batch.update.wal.CompletionStatistic; +import org.awaitility.Awaitility; +import org.awaitility.Duration; +import org.junit.Test; + +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.concurrent.atomic.AtomicInteger; +import java.util.concurrent.atomic.AtomicReference; +import java.util.function.Consumer; + +import static nosql.batch.update.RecoveryTest.completionStatisticAssertion; +import static org.assertj.core.api.Assertions.assertThatThrownBy; + +/** + * Checks whether transaction + * - removed on failed lock + * - retains on failed mutation + */ +abstract public class BatchRetentionTest { + + protected abstract void cleanUp() throws InterruptedException; + protected abstract void runUpdate(); + + protected abstract CompletionStatistic runCompleter(); + protected abstract void checkForConsistency(); + + protected static final AtomicReference failsAcquireLock = new AtomicReference<>(); + protected static final AtomicReference failsCheckValue = new AtomicReference<>(); + protected static final AtomicBoolean failsMutate = new AtomicBoolean(); + protected static final AtomicBoolean failsReleaseLock = new AtomicBoolean(); + protected static final AtomicBoolean failsDeleteBatch = new AtomicBoolean(); + protected static final AtomicInteger deletesInProcess = new AtomicInteger(); + + @Test + public void shouldKeepConsistencyIfAcquireFailed() throws InterruptedException { + shouldBecameConsistentAfterFailAndCompletion(() -> failsAcquireLock.set(new RuntimeException()), RuntimeException.class, + completionStatisticAssertion(1, 1, 0)); + } + + @Test + public void shouldKeepConsistencyIfAcquireFailedWithLockingException() throws InterruptedException { + shouldBecameConsistentAfterFailAndCompletion(() -> failsAcquireLock.set(new TemporaryLockingException("test")), RuntimeException.class, + completionStatisticAssertion(0, 0, 0)); + } + + @Test + public void shouldKeepConsistencyIfCheckValueFailed() throws InterruptedException { + shouldBecameConsistentAfterFailAndCompletion(() -> failsCheckValue.set(new RuntimeException()), RuntimeException.class, + completionStatisticAssertion(1, 1, 0)); + } + + @Test + public void shouldKeepConsistencyIfCheckValueFailedWithLockingExcption() throws InterruptedException { + shouldBecameConsistentAfterFailAndCompletion(() -> failsCheckValue.set(new PermanentLockingException("test")), RuntimeException.class, + completionStatisticAssertion(0, 0, 0)); + } + + @Test + public void shouldKeepConsistencyIfMutationFailed() throws InterruptedException { + shouldBecameConsistentAfterFailAndCompletion(() -> failsMutate.set(true), RuntimeException.class, + completionStatisticAssertion(1, 1, 0)); + } + + @Test + public void shouldKeepConsistencyIfReleaseFailed() throws InterruptedException { + shouldBecameConsistentAfterFailAndCompletion(() -> failsReleaseLock.set(true), RuntimeException.class, + completionStatisticAssertion(1)); + } + + @Test + public void shouldRetainBatchIfDeleteBatchFailed() throws InterruptedException { + shouldBecameConsistentAfterFailAndCompletion(() -> failsDeleteBatch.set(true), null, + completionStatisticAssertion(10, 0, 10)); + } + + private void shouldBecameConsistentAfterFailAndCompletion( + Runnable breaker, Class expectedException, + Consumer completionStatisticAssertion) throws InterruptedException { + for (int i = 0; i < 20; i++) { + cleanUp(); + + fixAll(); + + breaker.run(); + + if(expectedException != null){ + assertThatThrownBy(this::runUpdate) + .isInstanceOf(expectedException); + } else { + runUpdate(); + } + + Awaitility.waitAtMost(Duration.ONE_SECOND).until(() -> deletesInProcess.get() == 0); + + fixAll(); + + CompletionStatistic completionStatistic = runCompleter(); + completionStatisticAssertion.accept(completionStatistic); + + checkForConsistency(); + } + } + + private void fixAll(){ + failsAcquireLock.set(null); + failsCheckValue.set(null); + failsMutate.set(false); + failsReleaseLock.set(false); + failsDeleteBatch.set(false); + } +} + diff --git a/batch-updater/src/test/java/nosql/batch/update/FailingUpdateOperations.java b/batch-updater/src/test/java/nosql/batch/update/FailingUpdateOperations.java new file mode 100644 index 0000000..4155b78 --- /dev/null +++ b/batch-updater/src/test/java/nosql/batch/update/FailingUpdateOperations.java @@ -0,0 +1,29 @@ +package nosql.batch.update; + +import java.util.concurrent.atomic.AtomicBoolean; + +abstract public class FailingUpdateOperations implements UpdateOperations { + + private final UpdateOperations updateOperations; + private final AtomicBoolean failsUpdate; + + public FailingUpdateOperations(UpdateOperations updateOperations, AtomicBoolean failsUpdate) { + this.updateOperations = updateOperations; + this.failsUpdate = failsUpdate; + } + + abstract protected UPDATES selectFlakingToUpdate(UPDATES batchOfUpdates); + + @Override + public void updateMany(UPDATES batchOfUpdates, boolean calledByWal) { + if(failsUpdate.get()){ + UPDATES partialUpdate = selectFlakingToUpdate(batchOfUpdates); + updateOperations.updateMany(partialUpdate, calledByWal); + throw new RuntimeException(); + } + else { + updateOperations.updateMany(batchOfUpdates, calledByWal); + } + } + +} diff --git a/batch-updater/src/test/java/nosql/batch/update/HangingUpdateOperations.java b/batch-updater/src/test/java/nosql/batch/update/HangingUpdateOperations.java new file mode 100644 index 0000000..4076121 --- /dev/null +++ b/batch-updater/src/test/java/nosql/batch/update/HangingUpdateOperations.java @@ -0,0 +1,31 @@ +package nosql.batch.update; + +import java.util.concurrent.atomic.AtomicBoolean; + +import static nosql.batch.update.util.HangingUtil.hang; + +abstract public class HangingUpdateOperations implements UpdateOperations { + + private final UpdateOperations updateOperations; + private final AtomicBoolean hangUpdate; + + public HangingUpdateOperations(UpdateOperations updateOperations, AtomicBoolean hangUpdate) { + this.updateOperations = updateOperations; + this.hangUpdate = hangUpdate; + } + + abstract protected UPDATES selectFlakingToUpdate(UPDATES batchOfUpdates); + + @Override + public void updateMany(UPDATES batchOfUpdates, boolean calledByWal) { + if(hangUpdate.get()){ + UPDATES partialUpdate = selectFlakingToUpdate(batchOfUpdates); + updateOperations.updateMany(partialUpdate, calledByWal); + hang(); + } + else { + updateOperations.updateMany(batchOfUpdates, calledByWal); + } + } + +} diff --git a/batch-updater/src/test/java/nosql/batch/update/RecoveryTest.java b/batch-updater/src/test/java/nosql/batch/update/RecoveryTest.java new file mode 100644 index 0000000..8a351f7 --- /dev/null +++ b/batch-updater/src/test/java/nosql/batch/update/RecoveryTest.java @@ -0,0 +1,123 @@ +package nosql.batch.update; + +import nosql.batch.update.wal.CompletionStatistic; +import org.junit.Test; + +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.function.Consumer; +import java.util.function.Predicate; + +import static nosql.batch.update.util.HangingUtil.hanged; +import static org.assertj.core.api.Assertions.assertThat; +import static org.awaitility.Awaitility.await; +import static org.awaitility.Duration.FIVE_MINUTES; + +/** + * Check that hanged batch get recovered by WriteAheadLogCompleter + */ +abstract public class RecoveryTest { + + protected abstract void cleanUp() throws InterruptedException; + + protected abstract void runUpdate(); + + protected abstract CompletionStatistic runCompleter(); + + protected abstract void checkForConsistency(); + + protected static final AtomicBoolean hangsAcquire = new AtomicBoolean(); + protected static final AtomicBoolean hangsUpdate = new AtomicBoolean(); + protected static final AtomicBoolean hangsRelease = new AtomicBoolean(); + protected static final AtomicBoolean hangsDeleteBatchInWal = new AtomicBoolean(); + + @Test + public void shouldBecameConsistentAfterAcquireLockHanged() throws InterruptedException { + shouldBecameConsistentAfterHangAndCompletion( + () -> hangsAcquire.set(true), + completionStatisticAssertion(1, 1, 0)); + } + + @Test + public void shouldBecameConsistentAfterMutateHanged() throws InterruptedException { + shouldBecameConsistentAfterHangAndCompletion( + () -> hangsUpdate.set(true), + completionStatisticAssertion(1, 1, 0)); + } + + @Test + public void shouldBecameConsistentAfterReleaseLockHanged() throws InterruptedException { + shouldBecameConsistentAfterHangAndCompletion( + () -> hangsRelease.set(true), + completionStatisticAssertion(1)); + } + + @Test + public void shouldBecameConsistentAfterDeleteTransactionHanged() throws InterruptedException { + shouldBecameConsistentAfterHangAndCompletion( + () -> hangsDeleteBatchInWal.set(true), + completionStatisticAssertion( + staleBatchesFound -> staleBatchesFound >= 1, + staleBatchesComplete -> staleBatchesComplete == 0, + staleBatchesIgnored -> staleBatchesIgnored >= 1)); + } + + protected void shouldBecameConsistentAfterHangAndCompletion( + Runnable breaker, Consumer completionStatisticAssertion) throws InterruptedException { + + for(int i = 0; i < 10; i++) { + cleanUp(); + + fixAll(); + breaker.run(); + + new Thread(this::runUpdate).start(); + + await().dontCatchUncaughtExceptions() + .timeout(FIVE_MINUTES) + .until(hanged::get); + + fixAll(); + + CompletionStatistic completionStat = runCompleter(); + completionStatisticAssertion.accept(completionStat); + + //check state. It should be fixed at this time + checkForConsistency(); + + //check normal update is possible + runUpdate(); + checkForConsistency(); + } + } + + protected void fixAll() { + hangsAcquire.set(false); + hangsUpdate.set(false); + hangsRelease.set(false); + hangsDeleteBatchInWal.set(false); + hanged.set(false); + } + + static Consumer completionStatisticAssertion( + int staleBatchesFound, int staleBatchesComplete, int staleBatchesIgnored){ + return completionStatistic -> { + assertThat(completionStatistic.staleBatchesFound).isEqualTo(staleBatchesFound); + assertThat(completionStatistic.staleBatchesComplete).isEqualTo(staleBatchesComplete); + assertThat(completionStatistic.staleBatchesIgnored).isEqualTo(staleBatchesIgnored); + }; + } + + static Consumer completionStatisticAssertion( + Predicate staleBatchesFound, Predicate staleBatchesComplete, Predicate staleBatchesIgnored){ + return completionStatistic -> { + assertThat(completionStatistic.staleBatchesFound).matches(staleBatchesFound); + assertThat(completionStatistic.staleBatchesComplete).matches(staleBatchesComplete); + assertThat(completionStatistic.staleBatchesIgnored).matches(staleBatchesIgnored); + }; + } + + static Consumer completionStatisticAssertion(int staleBatchesFound){ + return completionStatistic -> + assertThat(completionStatistic.staleBatchesFound).isEqualTo(staleBatchesFound); + } +} diff --git a/batch-updater/src/test/java/nosql/batch/update/lock/HangingLockOperations.java b/batch-updater/src/test/java/nosql/batch/update/lock/HangingLockOperations.java new file mode 100644 index 0000000..7240a72 --- /dev/null +++ b/batch-updater/src/test/java/nosql/batch/update/lock/HangingLockOperations.java @@ -0,0 +1,57 @@ +package nosql.batch.update.lock; + +import java.util.List; +import java.util.concurrent.atomic.AtomicBoolean; + +import static nosql.batch.update.util.HangingUtil.hang; + +abstract public class HangingLockOperations implements LockOperations { + + private final LockOperations lockOperations; + private final AtomicBoolean failsAcquire; + private final AtomicBoolean failsRelease; + + public HangingLockOperations(LockOperations lockOperations, + AtomicBoolean failsAcquire, AtomicBoolean failsRelease) { + this.lockOperations = lockOperations; + this.failsAcquire = failsAcquire; + this.failsRelease = failsRelease; + } + + abstract protected LOCKS selectFlakingToAcquire(LOCKS locks); + abstract protected List selectFlakingToRelease(List locks); + + @Override + public List acquire(BATCH_ID batchId, LOCKS locks, boolean checkTransactionId) throws LockingException { + if(failsAcquire.get()) { + LOCKS partialLocks = selectFlakingToAcquire(locks); + try { + return lockOperations.acquire(batchId, partialLocks, checkTransactionId); + } finally { + hang(); + } + } else { + return lockOperations.acquire(batchId, locks, checkTransactionId); + } + } + + @Override + public List getLockedByBatchUpdate(LOCKS locks, BATCH_ID batchId) { + return lockOperations.getLockedByBatchUpdate(locks, batchId); + } + + @Override + public void release(List locks, BATCH_ID batchId) { + if(failsRelease.get()){ + List partialLocks = selectFlakingToRelease(locks); + try { + lockOperations.release(partialLocks, batchId); + } finally { + hang(); + } + } else { + lockOperations.release(locks, batchId); + } + } + +} \ No newline at end of file diff --git a/batch-updater/src/test/java/nosql/batch/update/lock/LockOperationsTest.java b/batch-updater/src/test/java/nosql/batch/update/lock/LockOperationsTest.java new file mode 100644 index 0000000..9e34527 --- /dev/null +++ b/batch-updater/src/test/java/nosql/batch/update/lock/LockOperationsTest.java @@ -0,0 +1,67 @@ +package nosql.batch.update.lock; + +import org.junit.Test; + +import java.util.List; +import java.util.stream.Collectors; + +import static org.assertj.core.api.Assertions.assertThat; +import static org.assertj.core.api.Assertions.assertThatThrownBy; + +abstract public class LockOperationsTest { + + private final LockOperations lockOperations; + + public LockOperationsTest(LockOperations lockOperations) { + this.lockOperations = lockOperations; + } + + abstract protected LOCKS getLocks1(); + abstract protected BATCH_ID generateBatchId(); + abstract protected void assertThatSameLockKeys(List locks1, List locks2); + + @Test + public void shouldNotLockLocked(){ + BATCH_ID batchId1 = generateBatchId(); + List acquiredLocks = lockOperations.acquire(batchId1, getLocks1(), false); + assertThat(acquiredLocks).isNotEmpty(); + assertThat(acquiredLocks.stream().map(l -> l.lockType).collect(Collectors.toSet())) + .containsExactly(Lock.LockType.LOCKED); + + assertThatThrownBy(() -> lockOperations.acquire(generateBatchId(), getLocks1(), false)) + .isInstanceOf(TemporaryLockingException.class); + + lockOperations.release(acquiredLocks, batchId1); + + List acquiredLocks1 = lockOperations.acquire(batchId1, getLocks1(), false); + assertThat(acquiredLocks1).containsExactlyInAnyOrderElementsOf(acquiredLocks); + } + + @Test + public void shouldLockLockedForSameBatch(){ + BATCH_ID batchId1 = generateBatchId(); + List acquiredLocks = lockOperations.acquire(batchId1, getLocks1(), false); + assertThat(acquiredLocks).isNotEmpty(); + assertThat(acquiredLocks.stream().map(l -> l.lockType).collect(Collectors.toSet())) + .containsExactly(Lock.LockType.LOCKED); + + List acquiredLocks1 = lockOperations.acquire(batchId1, getLocks1(), true); + + assertThatSameLockKeys(acquiredLocks1, acquiredLocks); + assertThat(acquiredLocks1.stream().map(l -> l.lockType).collect(Collectors.toSet())) + .containsExactly(Lock.LockType.SAME_BATCH); + } + + @Test + public void shouldReturnLocked(){ + BATCH_ID batchId1 = generateBatchId(); + List acquiredLocks = lockOperations.acquire(batchId1, getLocks1(), false); + + List lockedLocks = lockOperations.getLockedByBatchUpdate(getLocks1(), batchId1); + + assertThatSameLockKeys(lockedLocks, acquiredLocks); + assertThat(lockedLocks.stream().map(l -> l.lockType).collect(Collectors.toSet())) + .containsExactly(Lock.LockType.SAME_BATCH); + } + +} diff --git a/batch-updater/src/test/java/nosql/batch/update/util/FixedClock.java b/batch-updater/src/test/java/nosql/batch/update/util/FixedClock.java new file mode 100644 index 0000000..6c7e783 --- /dev/null +++ b/batch-updater/src/test/java/nosql/batch/update/util/FixedClock.java @@ -0,0 +1,31 @@ +package nosql.batch.update.util; + +import java.time.Clock; +import java.time.Instant; +import java.time.ZoneId; +import java.util.concurrent.atomic.AtomicLong; + +public class FixedClock extends Clock { + + private final AtomicLong time = new AtomicLong(); + + @Override + public ZoneId getZone() { + return null; + } + + @Override + public Clock withZone(ZoneId zone) { + return null; + } + + @Override + public Instant instant() { + return Instant.ofEpochMilli(time.get()); + } + + public void setTime(long time) { + this.time.set(time); + } +} + diff --git a/batch-updater/src/test/java/nosql/batch/update/util/HangingUtil.java b/batch-updater/src/test/java/nosql/batch/update/util/HangingUtil.java new file mode 100644 index 0000000..03b094b --- /dev/null +++ b/batch-updater/src/test/java/nosql/batch/update/util/HangingUtil.java @@ -0,0 +1,37 @@ +package nosql.batch.update.util; + +import java.util.Collection; +import java.util.List; +import java.util.Random; +import java.util.concurrent.CompletableFuture; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.function.Consumer; +import java.util.stream.Collectors; + +public class HangingUtil { + + private static final Random random = new Random(); + public static final AtomicBoolean hanged = new AtomicBoolean(); + + public static List selectFlaking(Collection keys, Consumer failedConsumer) { + return keys.stream() + .filter(key -> { + boolean fail = random.nextBoolean(); + if (fail) { + failedConsumer.accept(key); + } + return !fail; + }) + .collect(Collectors.toList()); + } + + public static void hang() { + try { + hanged.set(true); + new CompletableFuture<>().get(); + } catch (InterruptedException | ExecutionException e) { + e.printStackTrace(); + } + } +} diff --git a/batch-updater/src/test/java/nosql/batch/update/wal/ExclusiveLockerTest.java b/batch-updater/src/test/java/nosql/batch/update/wal/ExclusiveLockerTest.java new file mode 100644 index 0000000..efb5e6c --- /dev/null +++ b/batch-updater/src/test/java/nosql/batch/update/wal/ExclusiveLockerTest.java @@ -0,0 +1,50 @@ +package nosql.batch.update.wal; + +import org.junit.After; +import org.junit.Test; + +import static org.assertj.core.api.Assertions.assertThat; + +abstract public class ExclusiveLockerTest { + + abstract protected ExclusiveLocker getExclusiveLocker(); + + private ExclusiveLocker exclusiveLocker = getExclusiveLocker(); + + @After + public void after(){ + exclusiveLocker.shutdown(); + } + + @Test + public void shouldBeReentrant(){ + assertThat(exclusiveLocker.acquire()).isTrue(); + assertThat(exclusiveLocker.acquire()).isTrue(); + + exclusiveLocker.release(); + } + + @Test + public void shouldBeExclusive() { + ExclusiveLocker exclusiveLocker2 = getExclusiveLocker(); + + assertThat(exclusiveLocker.acquire()).isTrue(); + assertThat(exclusiveLocker2.acquire()).isFalse(); + + exclusiveLocker.release(); + exclusiveLocker2.shutdown(); + } + + @Test + public void shouldLockAfterUnlock(){ + ExclusiveLocker exclusiveLocker2 = getExclusiveLocker(); + + assertThat(exclusiveLocker.acquire()).isTrue(); + exclusiveLocker.release(); + assertThat(exclusiveLocker2.acquire()).isTrue(); + + exclusiveLocker2.release(); + exclusiveLocker2.shutdown(); + } + +} diff --git a/batch-updater/src/test/java/nosql/batch/update/wal/FailingWriteAheadLogManager.java b/batch-updater/src/test/java/nosql/batch/update/wal/FailingWriteAheadLogManager.java new file mode 100644 index 0000000..ec9e484 --- /dev/null +++ b/batch-updater/src/test/java/nosql/batch/update/wal/FailingWriteAheadLogManager.java @@ -0,0 +1,57 @@ +package nosql.batch.update.wal; + +import nosql.batch.update.BatchUpdate; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.time.Duration; +import java.util.List; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.concurrent.atomic.AtomicInteger; + +public class FailingWriteAheadLogManager implements WriteAheadLogManager { + + private static final Logger logger = LoggerFactory.getLogger(FailingWriteAheadLogManager.class); + + private final WriteAheadLogManager writeAheadLogManager; + private final AtomicBoolean failsDelete; + + private final AtomicInteger deletesInProcess; + + public FailingWriteAheadLogManager(WriteAheadLogManager writeAheadLogManager, + AtomicBoolean failsDelete, AtomicInteger deletesInProcess) { + this.writeAheadLogManager = writeAheadLogManager; + this.failsDelete = failsDelete; + this.deletesInProcess = deletesInProcess; + } + + @Override + public BATCH_ID writeBatch(BatchUpdate batch) { + return writeAheadLogManager.writeBatch(batch); + } + + @Override + public boolean deleteBatch(BATCH_ID batchId) { + if(failsDelete.get()){ + logger.error("deleteBatch failed flaking for batchId [{}]", batchId); + throw new RuntimeException(); + } else { + deletesInProcess.incrementAndGet(); + try { + return writeAheadLogManager.deleteBatch(batchId); + } finally { + deletesInProcess.decrementAndGet(); + } + } + } + + @Override + public List getTimeRanges(Duration staleThreshold, int batchSize) { + return writeAheadLogManager.getTimeRanges(staleThreshold, batchSize); + } + + @Override + public List> getStaleBatchesForRange(WalTimeRange timeRange) { + return writeAheadLogManager.getStaleBatchesForRange(timeRange); + } +} diff --git a/batch-updater/src/test/java/nosql/batch/update/wal/HangingWriteAheadLogManager.java b/batch-updater/src/test/java/nosql/batch/update/wal/HangingWriteAheadLogManager.java new file mode 100644 index 0000000..4fd86df --- /dev/null +++ b/batch-updater/src/test/java/nosql/batch/update/wal/HangingWriteAheadLogManager.java @@ -0,0 +1,50 @@ +package nosql.batch.update.wal; + +import nosql.batch.update.BatchUpdate; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.time.Duration; +import java.util.List; +import java.util.concurrent.atomic.AtomicBoolean; + +import static nosql.batch.update.util.HangingUtil.hang; + +public class HangingWriteAheadLogManager implements WriteAheadLogManager { + + private static final Logger logger = LoggerFactory.getLogger(HangingWriteAheadLogManager.class); + + private final WriteAheadLogManager writeAheadLogManager; + private final AtomicBoolean failsDelete; + + public HangingWriteAheadLogManager(WriteAheadLogManager writeAheadLogManager, AtomicBoolean failsDelete) { + this.writeAheadLogManager = writeAheadLogManager; + this.failsDelete = failsDelete; + } + + @Override + public BATCH_ID writeBatch(BatchUpdate batch) { + return writeAheadLogManager.writeBatch(batch); + } + + @Override + public boolean deleteBatch(BATCH_ID batchId) { + if(failsDelete.get()){ + logger.error("deleteBatch failed hanging for batchId [{}]", batchId); + hang(); + throw new IllegalArgumentException(); + } else { + return writeAheadLogManager.deleteBatch(batchId); + } + } + + @Override + public List getTimeRanges(Duration staleThreshold, int batchSize) { + return writeAheadLogManager.getTimeRanges(staleThreshold, batchSize); + } + + @Override + public List> getStaleBatchesForRange(WalTimeRange timeRange) { + return writeAheadLogManager.getStaleBatchesForRange(timeRange); + } +} diff --git a/batch-updater/src/test/java/nosql/batch/update/wal/WriteAheadLogManagerTest.java b/batch-updater/src/test/java/nosql/batch/update/wal/WriteAheadLogManagerTest.java new file mode 100644 index 0000000..76d32fd --- /dev/null +++ b/batch-updater/src/test/java/nosql/batch/update/wal/WriteAheadLogManagerTest.java @@ -0,0 +1,39 @@ +package nosql.batch.update.wal; + +import org.junit.Test; + +import java.util.List; + +import static org.assertj.core.api.Assertions.assertThat; + +abstract public class WriteAheadLogManagerTest { + + abstract protected BATCH_ID saveBatch(); + + abstract protected boolean removeBatch(BATCH_ID batchId); + + abstract protected void switchClockAhead(); + + abstract protected List getStaleBatches(); + + @Test + public void shouldRetrieveStaleBatches(){ + BATCH_ID batchId = saveBatch(); + + List staleBatches = getStaleBatches(); + assertThat(staleBatches).isEmpty(); + + switchClockAhead(); + staleBatches = getStaleBatches(); + assertThat(staleBatches).containsExactly(batchId); + } + + @Test + public void shouldDeleteBatch(){ + BATCH_ID batchId = saveBatch(); + assertThat(removeBatch(batchId)).isTrue(); + assertThat(removeBatch(batchId)).isFalse(); + } + + +} diff --git a/mvnw b/mvnw new file mode 100755 index 0000000..8d937f4 --- /dev/null +++ b/mvnw @@ -0,0 +1,308 @@ +#!/bin/sh +# ---------------------------------------------------------------------------- +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +# ---------------------------------------------------------------------------- + +# ---------------------------------------------------------------------------- +# Apache Maven Wrapper startup batch script, version 3.2.0 +# +# Required ENV vars: +# ------------------ +# JAVA_HOME - location of a JDK home dir +# +# Optional ENV vars +# ----------------- +# MAVEN_OPTS - parameters passed to the Java VM when running Maven +# e.g. to debug Maven itself, use +# set MAVEN_OPTS=-Xdebug -Xrunjdwp:transport=dt_socket,server=y,suspend=y,address=8000 +# MAVEN_SKIP_RC - flag to disable loading of mavenrc files +# ---------------------------------------------------------------------------- + +if [ -z "$MAVEN_SKIP_RC" ] ; then + + if [ -f /usr/local/etc/mavenrc ] ; then + . /usr/local/etc/mavenrc + fi + + if [ -f /etc/mavenrc ] ; then + . /etc/mavenrc + fi + + if [ -f "$HOME/.mavenrc" ] ; then + . "$HOME/.mavenrc" + fi + +fi + +# OS specific support. $var _must_ be set to either true or false. +cygwin=false; +darwin=false; +mingw=false +case "$(uname)" in + CYGWIN*) cygwin=true ;; + MINGW*) mingw=true;; + Darwin*) darwin=true + # Use /usr/libexec/java_home if available, otherwise fall back to /Library/Java/Home + # See https://developer.apple.com/library/mac/qa/qa1170/_index.html + if [ -z "$JAVA_HOME" ]; then + if [ -x "/usr/libexec/java_home" ]; then + JAVA_HOME="$(/usr/libexec/java_home)"; export JAVA_HOME + else + JAVA_HOME="/Library/Java/Home"; export JAVA_HOME + fi + fi + ;; +esac + +if [ -z "$JAVA_HOME" ] ; then + if [ -r /etc/gentoo-release ] ; then + JAVA_HOME=$(java-config --jre-home) + fi +fi + +# For Cygwin, ensure paths are in UNIX format before anything is touched +if $cygwin ; then + [ -n "$JAVA_HOME" ] && + JAVA_HOME=$(cygpath --unix "$JAVA_HOME") + [ -n "$CLASSPATH" ] && + CLASSPATH=$(cygpath --path --unix "$CLASSPATH") +fi + +# For Mingw, ensure paths are in UNIX format before anything is touched +if $mingw ; then + [ -n "$JAVA_HOME" ] && [ -d "$JAVA_HOME" ] && + JAVA_HOME="$(cd "$JAVA_HOME" || (echo "cannot cd into $JAVA_HOME."; exit 1); pwd)" +fi + +if [ -z "$JAVA_HOME" ]; then + javaExecutable="$(which javac)" + if [ -n "$javaExecutable" ] && ! [ "$(expr "\"$javaExecutable\"" : '\([^ ]*\)')" = "no" ]; then + # readlink(1) is not available as standard on Solaris 10. + readLink=$(which readlink) + if [ ! "$(expr "$readLink" : '\([^ ]*\)')" = "no" ]; then + if $darwin ; then + javaHome="$(dirname "\"$javaExecutable\"")" + javaExecutable="$(cd "\"$javaHome\"" && pwd -P)/javac" + else + javaExecutable="$(readlink -f "\"$javaExecutable\"")" + fi + javaHome="$(dirname "\"$javaExecutable\"")" + javaHome=$(expr "$javaHome" : '\(.*\)/bin') + JAVA_HOME="$javaHome" + export JAVA_HOME + fi + fi +fi + +if [ -z "$JAVACMD" ] ; then + if [ -n "$JAVA_HOME" ] ; then + if [ -x "$JAVA_HOME/jre/sh/java" ] ; then + # IBM's JDK on AIX uses strange locations for the executables + JAVACMD="$JAVA_HOME/jre/sh/java" + else + JAVACMD="$JAVA_HOME/bin/java" + fi + else + JAVACMD="$(\unset -f command 2>/dev/null; \command -v java)" + fi +fi + +if [ ! -x "$JAVACMD" ] ; then + echo "Error: JAVA_HOME is not defined correctly." >&2 + echo " We cannot execute $JAVACMD" >&2 + exit 1 +fi + +if [ -z "$JAVA_HOME" ] ; then + echo "Warning: JAVA_HOME environment variable is not set." +fi + +# traverses directory structure from process work directory to filesystem root +# first directory with .mvn subdirectory is considered project base directory +find_maven_basedir() { + if [ -z "$1" ] + then + echo "Path not specified to find_maven_basedir" + return 1 + fi + + basedir="$1" + wdir="$1" + while [ "$wdir" != '/' ] ; do + if [ -d "$wdir"/.mvn ] ; then + basedir=$wdir + break + fi + # workaround for JBEAP-8937 (on Solaris 10/Sparc) + if [ -d "${wdir}" ]; then + wdir=$(cd "$wdir/.." || exit 1; pwd) + fi + # end of workaround + done + printf '%s' "$(cd "$basedir" || exit 1; pwd)" +} + +# concatenates all lines of a file +concat_lines() { + if [ -f "$1" ]; then + # Remove \r in case we run on Windows within Git Bash + # and check out the repository with auto CRLF management + # enabled. Otherwise, we may read lines that are delimited with + # \r\n and produce $'-Xarg\r' rather than -Xarg due to word + # splitting rules. + tr -s '\r\n' ' ' < "$1" + fi +} + +log() { + if [ "$MVNW_VERBOSE" = true ]; then + printf '%s\n' "$1" + fi +} + +BASE_DIR=$(find_maven_basedir "$(dirname "$0")") +if [ -z "$BASE_DIR" ]; then + exit 1; +fi + +MAVEN_PROJECTBASEDIR=${MAVEN_BASEDIR:-"$BASE_DIR"}; export MAVEN_PROJECTBASEDIR +log "$MAVEN_PROJECTBASEDIR" + +########################################################################################## +# Extension to allow automatically downloading the maven-wrapper.jar from Maven-central +# This allows using the maven wrapper in projects that prohibit checking in binary data. +########################################################################################## +wrapperJarPath="$MAVEN_PROJECTBASEDIR/.mvn/wrapper/maven-wrapper.jar" +if [ -r "$wrapperJarPath" ]; then + log "Found $wrapperJarPath" +else + log "Couldn't find $wrapperJarPath, downloading it ..." + + if [ -n "$MVNW_REPOURL" ]; then + wrapperUrl="$MVNW_REPOURL/org/apache/maven/wrapper/maven-wrapper/3.2.0/maven-wrapper-3.2.0.jar" + else + wrapperUrl="https://repo.maven.apache.org/maven2/org/apache/maven/wrapper/maven-wrapper/3.2.0/maven-wrapper-3.2.0.jar" + fi + while IFS="=" read -r key value; do + # Remove '\r' from value to allow usage on windows as IFS does not consider '\r' as a separator ( considers space, tab, new line ('\n'), and custom '=' ) + safeValue=$(echo "$value" | tr -d '\r') + case "$key" in (wrapperUrl) wrapperUrl="$safeValue"; break ;; + esac + done < "$MAVEN_PROJECTBASEDIR/.mvn/wrapper/maven-wrapper.properties" + log "Downloading from: $wrapperUrl" + + if $cygwin; then + wrapperJarPath=$(cygpath --path --windows "$wrapperJarPath") + fi + + if command -v wget > /dev/null; then + log "Found wget ... using wget" + [ "$MVNW_VERBOSE" = true ] && QUIET="" || QUIET="--quiet" + if [ -z "$MVNW_USERNAME" ] || [ -z "$MVNW_PASSWORD" ]; then + wget $QUIET "$wrapperUrl" -O "$wrapperJarPath" || rm -f "$wrapperJarPath" + else + wget $QUIET --http-user="$MVNW_USERNAME" --http-password="$MVNW_PASSWORD" "$wrapperUrl" -O "$wrapperJarPath" || rm -f "$wrapperJarPath" + fi + elif command -v curl > /dev/null; then + log "Found curl ... using curl" + [ "$MVNW_VERBOSE" = true ] && QUIET="" || QUIET="--silent" + if [ -z "$MVNW_USERNAME" ] || [ -z "$MVNW_PASSWORD" ]; then + curl $QUIET -o "$wrapperJarPath" "$wrapperUrl" -f -L || rm -f "$wrapperJarPath" + else + curl $QUIET --user "$MVNW_USERNAME:$MVNW_PASSWORD" -o "$wrapperJarPath" "$wrapperUrl" -f -L || rm -f "$wrapperJarPath" + fi + else + log "Falling back to using Java to download" + javaSource="$MAVEN_PROJECTBASEDIR/.mvn/wrapper/MavenWrapperDownloader.java" + javaClass="$MAVEN_PROJECTBASEDIR/.mvn/wrapper/MavenWrapperDownloader.class" + # For Cygwin, switch paths to Windows format before running javac + if $cygwin; then + javaSource=$(cygpath --path --windows "$javaSource") + javaClass=$(cygpath --path --windows "$javaClass") + fi + if [ -e "$javaSource" ]; then + if [ ! -e "$javaClass" ]; then + log " - Compiling MavenWrapperDownloader.java ..." + ("$JAVA_HOME/bin/javac" "$javaSource") + fi + if [ -e "$javaClass" ]; then + log " - Running MavenWrapperDownloader.java ..." + ("$JAVA_HOME/bin/java" -cp .mvn/wrapper MavenWrapperDownloader "$wrapperUrl" "$wrapperJarPath") || rm -f "$wrapperJarPath" + fi + fi + fi +fi +########################################################################################## +# End of extension +########################################################################################## + +# If specified, validate the SHA-256 sum of the Maven wrapper jar file +wrapperSha256Sum="" +while IFS="=" read -r key value; do + case "$key" in (wrapperSha256Sum) wrapperSha256Sum=$value; break ;; + esac +done < "$MAVEN_PROJECTBASEDIR/.mvn/wrapper/maven-wrapper.properties" +if [ -n "$wrapperSha256Sum" ]; then + wrapperSha256Result=false + if command -v sha256sum > /dev/null; then + if echo "$wrapperSha256Sum $wrapperJarPath" | sha256sum -c > /dev/null 2>&1; then + wrapperSha256Result=true + fi + elif command -v shasum > /dev/null; then + if echo "$wrapperSha256Sum $wrapperJarPath" | shasum -a 256 -c > /dev/null 2>&1; then + wrapperSha256Result=true + fi + else + echo "Checksum validation was requested but neither 'sha256sum' or 'shasum' are available." + echo "Please install either command, or disable validation by removing 'wrapperSha256Sum' from your maven-wrapper.properties." + exit 1 + fi + if [ $wrapperSha256Result = false ]; then + echo "Error: Failed to validate Maven wrapper SHA-256, your Maven wrapper might be compromised." >&2 + echo "Investigate or delete $wrapperJarPath to attempt a clean download." >&2 + echo "If you updated your Maven version, you need to update the specified wrapperSha256Sum property." >&2 + exit 1 + fi +fi + +MAVEN_OPTS="$(concat_lines "$MAVEN_PROJECTBASEDIR/.mvn/jvm.config") $MAVEN_OPTS" + +# For Cygwin, switch paths to Windows format before running java +if $cygwin; then + [ -n "$JAVA_HOME" ] && + JAVA_HOME=$(cygpath --path --windows "$JAVA_HOME") + [ -n "$CLASSPATH" ] && + CLASSPATH=$(cygpath --path --windows "$CLASSPATH") + [ -n "$MAVEN_PROJECTBASEDIR" ] && + MAVEN_PROJECTBASEDIR=$(cygpath --path --windows "$MAVEN_PROJECTBASEDIR") +fi + +# Provide a "standardized" way to retrieve the CLI args that will +# work with both Windows and non-Windows executions. +MAVEN_CMD_LINE_ARGS="$MAVEN_CONFIG $*" +export MAVEN_CMD_LINE_ARGS + +WRAPPER_LAUNCHER=org.apache.maven.wrapper.MavenWrapperMain + +# shellcheck disable=SC2086 # safe args +exec "$JAVACMD" \ + $MAVEN_OPTS \ + $MAVEN_DEBUG_OPTS \ + -classpath "$MAVEN_PROJECTBASEDIR/.mvn/wrapper/maven-wrapper.jar" \ + "-Dmaven.multiModuleProjectDirectory=${MAVEN_PROJECTBASEDIR}" \ + ${WRAPPER_LAUNCHER} $MAVEN_CONFIG "$@" diff --git a/mvnw.cmd b/mvnw.cmd new file mode 100644 index 0000000..c4586b5 --- /dev/null +++ b/mvnw.cmd @@ -0,0 +1,205 @@ +@REM ---------------------------------------------------------------------------- +@REM Licensed to the Apache Software Foundation (ASF) under one +@REM or more contributor license agreements. See the NOTICE file +@REM distributed with this work for additional information +@REM regarding copyright ownership. The ASF licenses this file +@REM to you under the Apache License, Version 2.0 (the +@REM "License"); you may not use this file except in compliance +@REM with the License. You may obtain a copy of the License at +@REM +@REM http://www.apache.org/licenses/LICENSE-2.0 +@REM +@REM Unless required by applicable law or agreed to in writing, +@REM software distributed under the License is distributed on an +@REM "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +@REM KIND, either express or implied. See the License for the +@REM specific language governing permissions and limitations +@REM under the License. +@REM ---------------------------------------------------------------------------- + +@REM ---------------------------------------------------------------------------- +@REM Apache Maven Wrapper startup batch script, version 3.2.0 +@REM +@REM Required ENV vars: +@REM JAVA_HOME - location of a JDK home dir +@REM +@REM Optional ENV vars +@REM MAVEN_BATCH_ECHO - set to 'on' to enable the echoing of the batch commands +@REM MAVEN_BATCH_PAUSE - set to 'on' to wait for a keystroke before ending +@REM MAVEN_OPTS - parameters passed to the Java VM when running Maven +@REM e.g. to debug Maven itself, use +@REM set MAVEN_OPTS=-Xdebug -Xrunjdwp:transport=dt_socket,server=y,suspend=y,address=8000 +@REM MAVEN_SKIP_RC - flag to disable loading of mavenrc files +@REM ---------------------------------------------------------------------------- + +@REM Begin all REM lines with '@' in case MAVEN_BATCH_ECHO is 'on' +@echo off +@REM set title of command window +title %0 +@REM enable echoing by setting MAVEN_BATCH_ECHO to 'on' +@if "%MAVEN_BATCH_ECHO%" == "on" echo %MAVEN_BATCH_ECHO% + +@REM set %HOME% to equivalent of $HOME +if "%HOME%" == "" (set "HOME=%HOMEDRIVE%%HOMEPATH%") + +@REM Execute a user defined script before this one +if not "%MAVEN_SKIP_RC%" == "" goto skipRcPre +@REM check for pre script, once with legacy .bat ending and once with .cmd ending +if exist "%USERPROFILE%\mavenrc_pre.bat" call "%USERPROFILE%\mavenrc_pre.bat" %* +if exist "%USERPROFILE%\mavenrc_pre.cmd" call "%USERPROFILE%\mavenrc_pre.cmd" %* +:skipRcPre + +@setlocal + +set ERROR_CODE=0 + +@REM To isolate internal variables from possible post scripts, we use another setlocal +@setlocal + +@REM ==== START VALIDATION ==== +if not "%JAVA_HOME%" == "" goto OkJHome + +echo. +echo Error: JAVA_HOME not found in your environment. >&2 +echo Please set the JAVA_HOME variable in your environment to match the >&2 +echo location of your Java installation. >&2 +echo. +goto error + +:OkJHome +if exist "%JAVA_HOME%\bin\java.exe" goto init + +echo. +echo Error: JAVA_HOME is set to an invalid directory. >&2 +echo JAVA_HOME = "%JAVA_HOME%" >&2 +echo Please set the JAVA_HOME variable in your environment to match the >&2 +echo location of your Java installation. >&2 +echo. +goto error + +@REM ==== END VALIDATION ==== + +:init + +@REM Find the project base dir, i.e. the directory that contains the folder ".mvn". +@REM Fallback to current working directory if not found. + +set MAVEN_PROJECTBASEDIR=%MAVEN_BASEDIR% +IF NOT "%MAVEN_PROJECTBASEDIR%"=="" goto endDetectBaseDir + +set EXEC_DIR=%CD% +set WDIR=%EXEC_DIR% +:findBaseDir +IF EXIST "%WDIR%"\.mvn goto baseDirFound +cd .. +IF "%WDIR%"=="%CD%" goto baseDirNotFound +set WDIR=%CD% +goto findBaseDir + +:baseDirFound +set MAVEN_PROJECTBASEDIR=%WDIR% +cd "%EXEC_DIR%" +goto endDetectBaseDir + +:baseDirNotFound +set MAVEN_PROJECTBASEDIR=%EXEC_DIR% +cd "%EXEC_DIR%" + +:endDetectBaseDir + +IF NOT EXIST "%MAVEN_PROJECTBASEDIR%\.mvn\jvm.config" goto endReadAdditionalConfig + +@setlocal EnableExtensions EnableDelayedExpansion +for /F "usebackq delims=" %%a in ("%MAVEN_PROJECTBASEDIR%\.mvn\jvm.config") do set JVM_CONFIG_MAVEN_PROPS=!JVM_CONFIG_MAVEN_PROPS! %%a +@endlocal & set JVM_CONFIG_MAVEN_PROPS=%JVM_CONFIG_MAVEN_PROPS% + +:endReadAdditionalConfig + +SET MAVEN_JAVA_EXE="%JAVA_HOME%\bin\java.exe" +set WRAPPER_JAR="%MAVEN_PROJECTBASEDIR%\.mvn\wrapper\maven-wrapper.jar" +set WRAPPER_LAUNCHER=org.apache.maven.wrapper.MavenWrapperMain + +set WRAPPER_URL="https://repo.maven.apache.org/maven2/org/apache/maven/wrapper/maven-wrapper/3.2.0/maven-wrapper-3.2.0.jar" + +FOR /F "usebackq tokens=1,2 delims==" %%A IN ("%MAVEN_PROJECTBASEDIR%\.mvn\wrapper\maven-wrapper.properties") DO ( + IF "%%A"=="wrapperUrl" SET WRAPPER_URL=%%B +) + +@REM Extension to allow automatically downloading the maven-wrapper.jar from Maven-central +@REM This allows using the maven wrapper in projects that prohibit checking in binary data. +if exist %WRAPPER_JAR% ( + if "%MVNW_VERBOSE%" == "true" ( + echo Found %WRAPPER_JAR% + ) +) else ( + if not "%MVNW_REPOURL%" == "" ( + SET WRAPPER_URL="%MVNW_REPOURL%/org/apache/maven/wrapper/maven-wrapper/3.2.0/maven-wrapper-3.2.0.jar" + ) + if "%MVNW_VERBOSE%" == "true" ( + echo Couldn't find %WRAPPER_JAR%, downloading it ... + echo Downloading from: %WRAPPER_URL% + ) + + powershell -Command "&{"^ + "$webclient = new-object System.Net.WebClient;"^ + "if (-not ([string]::IsNullOrEmpty('%MVNW_USERNAME%') -and [string]::IsNullOrEmpty('%MVNW_PASSWORD%'))) {"^ + "$webclient.Credentials = new-object System.Net.NetworkCredential('%MVNW_USERNAME%', '%MVNW_PASSWORD%');"^ + "}"^ + "[Net.ServicePointManager]::SecurityProtocol = [Net.SecurityProtocolType]::Tls12; $webclient.DownloadFile('%WRAPPER_URL%', '%WRAPPER_JAR%')"^ + "}" + if "%MVNW_VERBOSE%" == "true" ( + echo Finished downloading %WRAPPER_JAR% + ) +) +@REM End of extension + +@REM If specified, validate the SHA-256 sum of the Maven wrapper jar file +SET WRAPPER_SHA_256_SUM="" +FOR /F "usebackq tokens=1,2 delims==" %%A IN ("%MAVEN_PROJECTBASEDIR%\.mvn\wrapper\maven-wrapper.properties") DO ( + IF "%%A"=="wrapperSha256Sum" SET WRAPPER_SHA_256_SUM=%%B +) +IF NOT %WRAPPER_SHA_256_SUM%=="" ( + powershell -Command "&{"^ + "$hash = (Get-FileHash \"%WRAPPER_JAR%\" -Algorithm SHA256).Hash.ToLower();"^ + "If('%WRAPPER_SHA_256_SUM%' -ne $hash){"^ + " Write-Output 'Error: Failed to validate Maven wrapper SHA-256, your Maven wrapper might be compromised.';"^ + " Write-Output 'Investigate or delete %WRAPPER_JAR% to attempt a clean download.';"^ + " Write-Output 'If you updated your Maven version, you need to update the specified wrapperSha256Sum property.';"^ + " exit 1;"^ + "}"^ + "}" + if ERRORLEVEL 1 goto error +) + +@REM Provide a "standardized" way to retrieve the CLI args that will +@REM work with both Windows and non-Windows executions. +set MAVEN_CMD_LINE_ARGS=%* + +%MAVEN_JAVA_EXE% ^ + %JVM_CONFIG_MAVEN_PROPS% ^ + %MAVEN_OPTS% ^ + %MAVEN_DEBUG_OPTS% ^ + -classpath %WRAPPER_JAR% ^ + "-Dmaven.multiModuleProjectDirectory=%MAVEN_PROJECTBASEDIR%" ^ + %WRAPPER_LAUNCHER% %MAVEN_CONFIG% %* +if ERRORLEVEL 1 goto error +goto end + +:error +set ERROR_CODE=1 + +:end +@endlocal & set ERROR_CODE=%ERROR_CODE% + +if not "%MAVEN_SKIP_RC%"=="" goto skipRcPost +@REM check for post script, once with legacy .bat ending and once with .cmd ending +if exist "%USERPROFILE%\mavenrc_post.bat" call "%USERPROFILE%\mavenrc_post.bat" +if exist "%USERPROFILE%\mavenrc_post.cmd" call "%USERPROFILE%\mavenrc_post.cmd" +:skipRcPost + +@REM pause the script if MAVEN_BATCH_PAUSE is set to 'on' +if "%MAVEN_BATCH_PAUSE%"=="on" pause + +if "%MAVEN_TERMINATE_CMD%"=="on" exit %ERROR_CODE% + +cmd /C exit /B %ERROR_CODE% diff --git a/pom.xml b/pom.xml index 923576c..5755d25 100644 --- a/pom.xml +++ b/pom.xml @@ -2,22 +2,26 @@ 4.0.0 - io.kptfh.nosql + com.playtika.nosql batch-updater-parent - 0.0.1 + 0.0.22 pom batch-updater + reactor-batch-updater + aerospike-batch-updater + aerospike-reactor-batch-updater + aerospike-container nosql-batch-updater Batch updates on NoSql DBs - https://github.com/kptfh/nosql-batch-updater + https://github.com/PlaytikaOSS/nosql-batch-updater Github - https://github.com/kptfh/nosql-batch-updater/issues + https://github.com/PlaytikaOSS/nosql-batch-updater/issues @@ -28,6 +32,13 @@ + + https://svn.apache.org/viewvc/maven + scm:git:git://github.com/Playtika/nosql-batch-updater.git + scm:git:git@github.com:Playtika/nosql-batch-updater.git + HEAD + + kptfh @@ -36,31 +47,56 @@ + + + ossrh + https://oss.sonatype.org/content/repositories/snapshots + + + ossrh + https://oss.sonatype.org/service/local/staging/deploy/maven2/ + + + - 1.8 + 17 ${java.version} ${java.version} UTF-8 UTF-8 + 6.2.0 + 6.1.2 1.7.25 + 3.2.12.RELEASE + + 1.19.1 4.12 3.9.0 + 1.0.8.RELEASE - 1.8.0-beta0 - 1.3 - 1.9.5 2.11.1 + 3.1.6 - 0.7.7.201606060606 - 4.1.0 - - 3.5.1 - 2.19.1 - 2.3 + 3.11.0 + 3.2.1 + 3.3.0 + 3.3.0 + 3.6.0 + 3.3.0 + 1.6.13 + + 3.1.0 + 4.5.1 + + + 3EEF24C7 + false + true + never @@ -68,11 +104,43 @@ - io.kptfh.nosql + io.projectreactor + reactor-core + ${reactor-core.version} + + + + com.aerospike + aerospike-client + ${aerospike-client.version} + provided + + + + com.aerospike + aerospike-reactor-client + ${aerospike-reactor-client.version} + provided + + + + com.playtika.nosql batch-updater ${project.version} + + com.playtika.nosql + aerospike-batch-updater + ${project.version} + + + + com.playtika.nosql + reactor-batch-updater + ${project.version} + + org.slf4j slf4j-api @@ -81,6 +149,43 @@ + + com.playtika.nosql + batch-updater + ${project.version} + test-jar + test + + + + com.playtika.nosql + aerospike-batch-updater + ${project.version} + test-jar + test + + + + com.playtika.nosql + reactor-batch-updater + ${project.version} + test-jar + test + + + + com.playtika.nosql + aerospike-container + ${project.version} + test + + + + org.testcontainers + testcontainers + ${testcontainers.version} + + junit junit @@ -103,24 +208,37 @@ - org.apache.logging.log4j - log4j-api - ${log4j.version} + org.awaitility + awaitility + ${awaitility.version} + test + + + + + io.projectreactor.tools + blockhound + ${blockhound.version} test - - - - org.springframework.boot - spring-boot-maven-plugin + org.apache.maven.plugins + maven-jar-plugin + ${maven-jar-plugin.version} + + + + test-jar + + + @@ -138,19 +256,16 @@ org.apache.maven.plugins maven-surefire-plugin ${maven-surefire-plugin.version} - - - - - org.jacoco - jacoco-maven-plugin - ${jacoco-plugin.version} + + -XX:+AllowRedefinitionToAddDeleteMethods + org.apache.maven.plugins maven-source-plugin + ${maven-source-plugin.version} attach-sources @@ -161,17 +276,11 @@ - - - org.eluder.coveralls - coveralls-maven-plugin - ${coveralls-plugin.version} - - org.apache.maven.plugins maven-javadoc-plugin + ${maven.javadoc.plugin.version} attach-javadocs @@ -180,65 +289,93 @@ + + 17 + none + false + + + + + io.github.gitflow-incremental-builder + gitflow-incremental-builder + ${gitflow-incremental-builder.version} + true + + develop + true + refs/remotes/origin/develop + always + true + impacted + \.github[/\\].*|\.mvn[/\\].*|mvnw.* + + (.*[/\\])?README.adoc + - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - maven-release-plugin - 2.4.1 - - false - release - true - + org.apache.maven.plugins + maven-compiler-plugin - - - - - - bintray-kptfh-feign-reactive - kptfh-feign-reactive - https://api.bintray.com/maven/kptfh/json-reactive/json/;publish=1 - - + + org.apache.maven.plugins + maven-javadoc-plugin + + + + org.apache.maven.plugins + maven-source-plugin + + + + org.apache.maven.plugins + maven-surefire-plugin + + + io.github.gitflow-incremental-builder + gitflow-incremental-builder + + + + + + ossrh + + + + org.sonatype.plugins + nexus-staging-maven-plugin + ${nexus-staging-maven-plugin.version} + true + + ossrh + https://oss.sonatype.org/ + true + + + + org.apache.maven.plugins + maven-gpg-plugin + ${maven-gpg-plugin.version} + + + sign-artifacts + verify + + sign + + + + + + + + diff --git a/reactor-batch-updater/pom.xml b/reactor-batch-updater/pom.xml new file mode 100644 index 0000000..c971804 --- /dev/null +++ b/reactor-batch-updater/pom.xml @@ -0,0 +1,80 @@ + + + + 4.0.0 + + + com.playtika.nosql + batch-updater-parent + 0.0.22 + + + reactor-batch-updater + jar + Reactor batch updates on NoSql DBs + + + + + com.playtika.nosql + batch-updater + + + + io.projectreactor + reactor-core + + + + org.slf4j + slf4j-api + + + + + + com.playtika.nosql + batch-updater + test-jar + test + + + + junit + junit + test + + + + org.assertj + assertj-core + test + + + + org.apache.logging.log4j + log4j-slf4j-impl + test + + + + org.awaitility + awaitility + test + + + \ No newline at end of file diff --git a/reactor-batch-updater/src/main/java/nosql/batch/update/reactor/ReactorBatchOperations.java b/reactor-batch-updater/src/main/java/nosql/batch/update/reactor/ReactorBatchOperations.java new file mode 100644 index 0000000..cbea78b --- /dev/null +++ b/reactor-batch-updater/src/main/java/nosql/batch/update/reactor/ReactorBatchOperations.java @@ -0,0 +1,73 @@ +package nosql.batch.update.reactor; + +import nosql.batch.update.BatchUpdate; +import nosql.batch.update.lock.Lock; +import nosql.batch.update.lock.LockingException; +import nosql.batch.update.reactor.lock.ReactorLockOperations; +import nosql.batch.update.reactor.wal.ReactorWriteAheadLogManager; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +import reactor.core.publisher.Mono; + +import java.util.Collection; + +public class ReactorBatchOperations { + + private static final Logger logger = LoggerFactory.getLogger(ReactorBatchOperations.class); + + private final ReactorWriteAheadLogManager writeAheadLogManager; + private final ReactorLockOperations lockOperations; + private final ReactorUpdateOperations updateOperations; + + public ReactorBatchOperations(ReactorWriteAheadLogManager writeAheadLogManager, + ReactorLockOperations lockOperations, + ReactorUpdateOperations updateOperations) { + this.writeAheadLogManager = writeAheadLogManager; + this.lockOperations = lockOperations; + this.updateOperations = updateOperations; + } + + public Mono processAndDeleteTransaction(BATCH_ID batchId, BatchUpdate batchUpdate, boolean calledByWal) { + return lockOperations.acquire(batchId, batchUpdate.locks(), calledByWal) + /*.onErrorResume(throwable -> onErrorCleaner.apply(batchLocks) + .then(Mono.error(throwable)))*/ + .doOnError(LockingException.class, throwable -> { + if(logger.isTraceEnabled()){ + logger.trace("Failed to acquire locks [{}] batchId=[{}]. Will release locks", batchId, batchUpdate.locks()); + } + releaseLocksAndDeleteWalTransactionOnError(batchUpdate.locks(), batchId) + .subscribe(); + }) + .flatMap(locked -> updateOperations.updateMany(batchUpdate.updates(), calledByWal) + .doOnSuccess(unused -> { + if(logger.isTraceEnabled()){ + logger.trace("Applied updates [{}] batchId=[{}]", batchId, batchUpdate); + } + }) + .then(releaseLocksAndDeleteWalTransaction(locked, batchId))); + } + + private Mono releaseLocksAndDeleteWalTransaction(Collection locks, BATCH_ID batchId) { + return lockOperations.release(locks, batchId) + //here we use fire&forget to reduce response time + .doOnSuccess(aVoid -> writeAheadLogManager.deleteBatch(batchId) + .doOnNext(deleted -> { + if(deleted) { + logger.trace("Removed batch from WAL: {}", batchId); + } else { + logger.error("Missed batch in WAL: {}", batchId); + } + }) + .subscribe()); + } + + public Mono releaseLocksAndDeleteWalTransactionOnError(LOCKS locks, BATCH_ID batchId) { + return lockOperations.getLockedByBatchUpdate(locks, batchId) + .flatMap(transactionLockKeys -> releaseLocksAndDeleteWalTransaction(transactionLockKeys, batchId)); + } + + public ReactorWriteAheadLogManager getWriteAheadLogManager() { + return writeAheadLogManager; + } + +} diff --git a/reactor-batch-updater/src/main/java/nosql/batch/update/reactor/ReactorBatchUpdater.java b/reactor-batch-updater/src/main/java/nosql/batch/update/reactor/ReactorBatchUpdater.java new file mode 100644 index 0000000..bc2c172 --- /dev/null +++ b/reactor-batch-updater/src/main/java/nosql/batch/update/reactor/ReactorBatchUpdater.java @@ -0,0 +1,47 @@ +package nosql.batch.update.reactor; + +import nosql.batch.update.BatchUpdate; +import nosql.batch.update.lock.Lock; +import nosql.batch.update.reactor.wal.ReactorWriteAheadLogManager; +import reactor.core.publisher.Mono; + +/** + * Used to run batch updates on NoSql storage. Initially it was developed for Aerospike but may be implemented for any. + * Updates should be idempotent so WriteAheadLogCompleter can safely complete interrupted batch + * There is 2 approaches in batch updates PRE_LOCK and POST_LOCK + * + * PRE_LOCK - used if you know in advance all records (keys) that should be updated + * It takes the following steps to complete batch update + * 1) Lock keys + * 2) Apply updates + * 3) Unlock keys + * + * POST_LOCK - used if you don't know in advance all records (keys) that should be updated. + * It takes the following steps to complete batch update + * 1) Prepare updates + * 2) Lock keys + * 3) Check expected values (to guarantee that no concurrent changes while running updates and acquiring locks) + * 4) Apply updates + * 5) Unlock keys + * + * @param + * @param + * @param + * @param + */ +public class ReactorBatchUpdater { + + private final ReactorWriteAheadLogManager writeAheadLogManager; + private final ReactorBatchOperations batchOperations; + + public ReactorBatchUpdater(ReactorBatchOperations batchOperations) { + this.batchOperations = batchOperations; + this.writeAheadLogManager = batchOperations.getWriteAheadLogManager(); + } + + public Mono update(BatchUpdate batchUpdate) { + return writeAheadLogManager.writeBatch(batchUpdate) + .flatMap(batchId -> batchOperations.processAndDeleteTransaction(batchId, batchUpdate, false)); + } + +} diff --git a/reactor-batch-updater/src/main/java/nosql/batch/update/reactor/ReactorUpdateOperations.java b/reactor-batch-updater/src/main/java/nosql/batch/update/reactor/ReactorUpdateOperations.java new file mode 100644 index 0000000..cc4e402 --- /dev/null +++ b/reactor-batch-updater/src/main/java/nosql/batch/update/reactor/ReactorUpdateOperations.java @@ -0,0 +1,8 @@ +package nosql.batch.update.reactor; + +import reactor.core.publisher.Mono; + +public interface ReactorUpdateOperations { + + Mono updateMany(UPDATES batchOfUpdates, boolean calledByWal); +} diff --git a/reactor-batch-updater/src/main/java/nosql/batch/update/reactor/lock/ReactorLockOperations.java b/reactor-batch-updater/src/main/java/nosql/batch/update/reactor/lock/ReactorLockOperations.java new file mode 100644 index 0000000..741d34f --- /dev/null +++ b/reactor-batch-updater/src/main/java/nosql/batch/update/reactor/lock/ReactorLockOperations.java @@ -0,0 +1,26 @@ +package nosql.batch.update.reactor.lock; + +import nosql.batch.update.lock.Lock; +import nosql.batch.update.lock.LockingException; +import reactor.core.publisher.Mono; + +import java.util.Collection; +import java.util.List; + +public interface ReactorLockOperations { + + /** + * + * @param batchId + * @param locks + * @param checkBatchId + * + * @return + */ + Mono> acquire(BATCH_ID batchId, + LOCKS locks, boolean checkBatchId) throws LockingException; + + Mono> getLockedByBatchUpdate(LOCKS locks, BATCH_ID batchId); + + Mono release(Collection locks, BATCH_ID batchId); +} diff --git a/reactor-batch-updater/src/main/java/nosql/batch/update/reactor/wal/ReactorWriteAheadLogCompleter.java b/reactor-batch-updater/src/main/java/nosql/batch/update/reactor/wal/ReactorWriteAheadLogCompleter.java new file mode 100644 index 0000000..686df93 --- /dev/null +++ b/reactor-batch-updater/src/main/java/nosql/batch/update/reactor/wal/ReactorWriteAheadLogCompleter.java @@ -0,0 +1,60 @@ +package nosql.batch.update.reactor.wal; + +import nosql.batch.update.lock.Lock; +import nosql.batch.update.reactor.ReactorBatchOperations; +import nosql.batch.update.wal.AbstractWriteAheadLogCompleter; +import nosql.batch.update.wal.ExclusiveLocker; +import nosql.batch.update.wal.WalRecord; +import nosql.batch.update.wal.WalTimeRange; + +import java.time.Duration; +import java.util.List; +import java.util.concurrent.ScheduledExecutorService; + +/** + * Completes hanged transactions + */ +public class ReactorWriteAheadLogCompleter + extends AbstractWriteAheadLogCompleter { + + private final ReactorWriteAheadLogManager writeAheadLogManager; + private final ReactorBatchOperations batchOperations; + + /** + * @param batchOperations + * @param staleBatchesThreshold + * @param exclusiveLocker + * @param scheduledExecutorService + */ + public ReactorWriteAheadLogCompleter(ReactorBatchOperations batchOperations, + Duration staleBatchesThreshold, + int batchSize, + ExclusiveLocker exclusiveLocker, ScheduledExecutorService scheduledExecutorService){ + super(staleBatchesThreshold, batchSize, exclusiveLocker, scheduledExecutorService); + this.writeAheadLogManager = batchOperations.getWriteAheadLogManager(); + this.batchOperations = batchOperations; + } + + @Override + protected void releaseLocksAndDeleteWalTransactionOnError(WalRecord batch) { + batchOperations.releaseLocksAndDeleteWalTransactionOnError( + batch.batchUpdate.locks(), batch.batchId).block(); + } + + @Override + protected void processAndDeleteTransactions(WalRecord batch) { + batchOperations.processAndDeleteTransaction( + batch.batchId, batch.batchUpdate, true).block(); + } + + @Override + protected List getTimeRanges(Duration staleBatchesThreshold, int batchSize) { + return writeAheadLogManager.getTimeRanges(staleBatchesThreshold, batchSize); + } + + @Override + protected List> getStaleBatchesForRange(WalTimeRange timeRange) { + return writeAheadLogManager.getStaleBatchesForRange(timeRange); + } + +} diff --git a/reactor-batch-updater/src/main/java/nosql/batch/update/reactor/wal/ReactorWriteAheadLogManager.java b/reactor-batch-updater/src/main/java/nosql/batch/update/reactor/wal/ReactorWriteAheadLogManager.java new file mode 100644 index 0000000..610b8b2 --- /dev/null +++ b/reactor-batch-updater/src/main/java/nosql/batch/update/reactor/wal/ReactorWriteAheadLogManager.java @@ -0,0 +1,22 @@ +package nosql.batch.update.reactor.wal; + + +import nosql.batch.update.BatchUpdate; +import nosql.batch.update.wal.WalRecord; +import nosql.batch.update.wal.WalTimeRange; +import reactor.core.publisher.Mono; + +import java.time.Duration; +import java.util.List; + +public interface ReactorWriteAheadLogManager { + + Mono writeBatch(BatchUpdate batch); + + Mono deleteBatch(BATCH_ID batchId); + + List getTimeRanges(Duration staleThreshold, int batchSize); + + List> getStaleBatchesForRange(WalTimeRange timeRange); + +} diff --git a/reactor-batch-updater/src/test/java/nosql/batch/update/ReactorFailingUpdateOperations.java b/reactor-batch-updater/src/test/java/nosql/batch/update/ReactorFailingUpdateOperations.java new file mode 100644 index 0000000..7d642ee --- /dev/null +++ b/reactor-batch-updater/src/test/java/nosql/batch/update/ReactorFailingUpdateOperations.java @@ -0,0 +1,32 @@ +package nosql.batch.update; + +import nosql.batch.update.reactor.ReactorUpdateOperations; +import reactor.core.publisher.Mono; + +import java.util.concurrent.atomic.AtomicBoolean; + +abstract public class ReactorFailingUpdateOperations implements ReactorUpdateOperations { + + private final ReactorUpdateOperations updateOperations; + private final AtomicBoolean failsUpdate; + + public ReactorFailingUpdateOperations(ReactorUpdateOperations updateOperations, AtomicBoolean failsUpdate) { + this.updateOperations = updateOperations; + this.failsUpdate = failsUpdate; + } + + abstract protected UPDATES selectFlakingToUpdate(UPDATES batchOfUpdates); + + @Override + public Mono updateMany(UPDATES batchOfUpdates, boolean calledByWal) { + if(failsUpdate.get()){ + UPDATES partialUpdate = selectFlakingToUpdate(batchOfUpdates); + return updateOperations.updateMany(partialUpdate, calledByWal) + .then(Mono.error(new RuntimeException())); + } + else { + return updateOperations.updateMany(batchOfUpdates, calledByWal); + } + } + +} diff --git a/reactor-batch-updater/src/test/java/nosql/batch/update/ReactorHangingUpdateOperations.java b/reactor-batch-updater/src/test/java/nosql/batch/update/ReactorHangingUpdateOperations.java new file mode 100644 index 0000000..5551fd7 --- /dev/null +++ b/reactor-batch-updater/src/test/java/nosql/batch/update/ReactorHangingUpdateOperations.java @@ -0,0 +1,34 @@ +package nosql.batch.update; + +import nosql.batch.update.reactor.ReactorUpdateOperations; +import reactor.core.publisher.Mono; + +import java.util.concurrent.atomic.AtomicBoolean; + +import static nosql.batch.update.reactor.util.ReactorHangingUtil.hang; + +abstract public class ReactorHangingUpdateOperations implements ReactorUpdateOperations { + + private final ReactorUpdateOperations updateOperations; + private final AtomicBoolean hangUpdate; + + public ReactorHangingUpdateOperations(ReactorUpdateOperations updateOperations, AtomicBoolean hangUpdate) { + this.updateOperations = updateOperations; + this.hangUpdate = hangUpdate; + } + + abstract protected UPDATES selectFlakingToUpdate(UPDATES batchOfUpdates); + + @Override + public Mono updateMany(UPDATES batchOfUpdates, boolean calledByWal) { + if(hangUpdate.get()){ + UPDATES partialUpdate = selectFlakingToUpdate(batchOfUpdates); + return updateOperations.updateMany(partialUpdate, calledByWal) + .then(hang()); + } + else { + return updateOperations.updateMany(batchOfUpdates, calledByWal); + } + } + +} diff --git a/reactor-batch-updater/src/test/java/nosql/batch/update/reactor/lock/ReactorHangingLockOperations.java b/reactor-batch-updater/src/test/java/nosql/batch/update/reactor/lock/ReactorHangingLockOperations.java new file mode 100644 index 0000000..74445af --- /dev/null +++ b/reactor-batch-updater/src/test/java/nosql/batch/update/reactor/lock/ReactorHangingLockOperations.java @@ -0,0 +1,57 @@ +package nosql.batch.update.reactor.lock; + +import nosql.batch.update.lock.Lock; +import nosql.batch.update.lock.LockingException; +import reactor.core.publisher.Mono; + +import java.util.Collection; +import java.util.List; +import java.util.concurrent.atomic.AtomicBoolean; + +import static nosql.batch.update.reactor.util.ReactorHangingUtil.hang; + +abstract public class ReactorHangingLockOperations implements ReactorLockOperations { + + private final ReactorLockOperations lockOperations; + private final AtomicBoolean failsAcquire; + private final AtomicBoolean failsRelease; + + public ReactorHangingLockOperations(ReactorLockOperations lockOperations, + AtomicBoolean failsAcquire, AtomicBoolean failsRelease) { + this.lockOperations = lockOperations; + this.failsAcquire = failsAcquire; + this.failsRelease = failsRelease; + } + + abstract protected LOCKS selectFlakingToAcquire(LOCKS locks); + abstract protected Collection selectFlakingToRelease(Collection locks); + + @Override + public Mono> acquire(BATCH_ID batchId, LOCKS locks, boolean checkTransactionId) throws LockingException { + if(failsAcquire.get()) { + LOCKS partialLocks = selectFlakingToAcquire(locks); + + return lockOperations.acquire(batchId, partialLocks, checkTransactionId) + .then(hang()); + } else { + return lockOperations.acquire(batchId, locks, checkTransactionId); + } + } + + @Override + public Mono> getLockedByBatchUpdate(LOCKS locks, BATCH_ID batchId) { + return lockOperations.getLockedByBatchUpdate(locks, batchId); + } + + @Override + public Mono release(Collection locks, BATCH_ID batchId) { + if(failsRelease.get()){ + Collection partialLocks = selectFlakingToRelease(locks); + return lockOperations.release(partialLocks, batchId) + .then(hang()); + } else { + return lockOperations.release(locks, batchId); + } + } + +} \ No newline at end of file diff --git a/reactor-batch-updater/src/test/java/nosql/batch/update/reactor/lock/ReactorLockOperationsTest.java b/reactor-batch-updater/src/test/java/nosql/batch/update/reactor/lock/ReactorLockOperationsTest.java new file mode 100644 index 0000000..b0ecf55 --- /dev/null +++ b/reactor-batch-updater/src/test/java/nosql/batch/update/reactor/lock/ReactorLockOperationsTest.java @@ -0,0 +1,69 @@ +package nosql.batch.update.reactor.lock; + +import nosql.batch.update.lock.Lock; +import nosql.batch.update.lock.TemporaryLockingException; +import org.junit.Test; + +import java.util.List; +import java.util.stream.Collectors; + +import static org.assertj.core.api.Assertions.assertThat; +import static org.assertj.core.api.Assertions.assertThatThrownBy; + +abstract public class ReactorLockOperationsTest { + + private final ReactorLockOperations lockOperations; + + public ReactorLockOperationsTest(ReactorLockOperations lockOperations) { + this.lockOperations = lockOperations; + } + + abstract protected LOCKS getLocks1(); + abstract protected BATCH_ID generateBatchId(); + abstract protected void assertThatSameLockKeys(List locks1, List locks2); + + @Test + public void shouldNotLockLocked(){ + BATCH_ID batchId1 = generateBatchId(); + List acquiredLocks = lockOperations.acquire(batchId1, getLocks1(), false).block(); + assertThat(acquiredLocks).isNotEmpty(); + assertThat(acquiredLocks.stream().map(l -> l.lockType).collect(Collectors.toSet())) + .containsExactly(Lock.LockType.LOCKED); + + assertThatThrownBy(() -> lockOperations.acquire(generateBatchId(), getLocks1(), false).block()) + .isInstanceOf(TemporaryLockingException.class); + + lockOperations.release(acquiredLocks, batchId1).block(); + + List acquiredLocks1 = lockOperations.acquire(batchId1, getLocks1(), false).block(); + assertThat(acquiredLocks1).containsExactlyInAnyOrderElementsOf(acquiredLocks); + } + + @Test + public void shouldLockLockedForSameBatch(){ + BATCH_ID batchId1 = generateBatchId(); + List acquiredLocks = lockOperations.acquire(batchId1, getLocks1(), false).block(); + assertThat(acquiredLocks).isNotEmpty(); + assertThat(acquiredLocks.stream().map(l -> l.lockType).collect(Collectors.toSet())) + .containsExactly(Lock.LockType.LOCKED); + + List acquiredLocks1 = lockOperations.acquire(batchId1, getLocks1(), true).block(); + + assertThatSameLockKeys(acquiredLocks1, acquiredLocks); + assertThat(acquiredLocks1.stream().map(l -> l.lockType).collect(Collectors.toSet())) + .containsExactly(Lock.LockType.SAME_BATCH); + } + + @Test + public void shouldReturnLocked(){ + BATCH_ID batchId1 = generateBatchId(); + List acquiredLocks = lockOperations.acquire(batchId1, getLocks1(), false).block(); + + List lockedLocks = lockOperations.getLockedByBatchUpdate(getLocks1(), batchId1).block(); + + assertThatSameLockKeys(lockedLocks, acquiredLocks); + assertThat(lockedLocks.stream().map(l -> l.lockType).collect(Collectors.toSet())) + .containsExactly(Lock.LockType.SAME_BATCH); + } + +} diff --git a/reactor-batch-updater/src/test/java/nosql/batch/update/reactor/util/ReactorHangingUtil.java b/reactor-batch-updater/src/test/java/nosql/batch/update/reactor/util/ReactorHangingUtil.java new file mode 100644 index 0000000..b055100 --- /dev/null +++ b/reactor-batch-updater/src/test/java/nosql/batch/update/reactor/util/ReactorHangingUtil.java @@ -0,0 +1,18 @@ +package nosql.batch.update.reactor.util; + +import reactor.core.publisher.Mono; +import reactor.core.scheduler.Schedulers; + +import java.util.concurrent.CompletableFuture; + +import static nosql.batch.update.util.HangingUtil.hanged; + +public class ReactorHangingUtil { + + public static Mono hang() { + return Mono.defer(() -> { + hanged.set(true); + return Mono.fromFuture(new CompletableFuture()); + }).publishOn(Schedulers.elastic()); + } +} diff --git a/reactor-batch-updater/src/test/java/nosql/batch/update/reactor/wal/ReactorFailingWriteAheadLogManager.java b/reactor-batch-updater/src/test/java/nosql/batch/update/reactor/wal/ReactorFailingWriteAheadLogManager.java new file mode 100644 index 0000000..6891b6b --- /dev/null +++ b/reactor-batch-updater/src/test/java/nosql/batch/update/reactor/wal/ReactorFailingWriteAheadLogManager.java @@ -0,0 +1,62 @@ +package nosql.batch.update.reactor.wal; + +import nosql.batch.update.BatchUpdate; +import nosql.batch.update.wal.WalRecord; +import nosql.batch.update.wal.WalTimeRange; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +import reactor.core.publisher.Mono; +import reactor.core.scheduler.Schedulers; + +import java.time.Duration; +import java.util.List; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.concurrent.atomic.AtomicInteger; + +public class ReactorFailingWriteAheadLogManager implements ReactorWriteAheadLogManager { + + private static final Logger logger = LoggerFactory.getLogger(ReactorFailingWriteAheadLogManager.class); + + private final ReactorWriteAheadLogManager writeAheadLogManager; + private final AtomicBoolean failsDelete; + + private final AtomicInteger deletesInProcess; + + public ReactorFailingWriteAheadLogManager(ReactorWriteAheadLogManager writeAheadLogManager, + AtomicBoolean failsDelete, AtomicInteger deletesInProcess) { + this.writeAheadLogManager = writeAheadLogManager; + this.failsDelete = failsDelete; + this.deletesInProcess = deletesInProcess; + } + + @Override + public Mono writeBatch(BatchUpdate batch) { + return writeAheadLogManager.writeBatch(batch); + } + + @Override + public Mono deleteBatch(BATCH_ID batchId) { + if(failsDelete.get()){ + return Mono.defer(() -> { + logger.error("deleteBatch failed flaking for batchId [{}]", batchId); + return Mono.error(new RuntimeException()) + .publishOn(Schedulers.elastic()); + }); + } else { + deletesInProcess.incrementAndGet(); + return writeAheadLogManager.deleteBatch(batchId) + .doOnSuccess(aVoid -> deletesInProcess.decrementAndGet()); + } + } + + @Override + public List getTimeRanges(Duration staleThreshold, int batchSize) { + return writeAheadLogManager.getTimeRanges(staleThreshold, batchSize); + } + + @Override + public List> getStaleBatchesForRange(WalTimeRange timeRange) { + return writeAheadLogManager.getStaleBatchesForRange(timeRange); + } + +} diff --git a/reactor-batch-updater/src/test/java/nosql/batch/update/reactor/wal/ReactorHangingWriteAheadLogManager.java b/reactor-batch-updater/src/test/java/nosql/batch/update/reactor/wal/ReactorHangingWriteAheadLogManager.java new file mode 100644 index 0000000..e83f789 --- /dev/null +++ b/reactor-batch-updater/src/test/java/nosql/batch/update/reactor/wal/ReactorHangingWriteAheadLogManager.java @@ -0,0 +1,53 @@ +package nosql.batch.update.reactor.wal; + +import nosql.batch.update.BatchUpdate; +import nosql.batch.update.wal.WalRecord; +import nosql.batch.update.wal.WalTimeRange; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +import reactor.core.publisher.Mono; + +import java.time.Duration; +import java.util.List; +import java.util.concurrent.atomic.AtomicBoolean; + +import static nosql.batch.update.reactor.util.ReactorHangingUtil.hang; + +public class ReactorHangingWriteAheadLogManager implements ReactorWriteAheadLogManager { + + private static Logger logger = LoggerFactory.getLogger(ReactorHangingWriteAheadLogManager.class); + + private final ReactorWriteAheadLogManager writeAheadLogManager; + private final AtomicBoolean failsDelete; + + public ReactorHangingWriteAheadLogManager(ReactorWriteAheadLogManager writeAheadLogManager, AtomicBoolean failsDelete) { + this.writeAheadLogManager = writeAheadLogManager; + this.failsDelete = failsDelete; + } + + @Override + public Mono writeBatch(BatchUpdate batch) { + return writeAheadLogManager.writeBatch(batch); + } + + @Override + public Mono deleteBatch(BATCH_ID batchId) { + if(failsDelete.get()){ + logger.error("deleteBatch failed hanging for batchId [{}]", batchId); + return hang(); + } else { + return writeAheadLogManager.deleteBatch(batchId); + } + } + + @Override + public List getTimeRanges(Duration staleThreshold, int batchSize) { + return writeAheadLogManager.getTimeRanges(staleThreshold, batchSize); + } + + @Override + public List> getStaleBatchesForRange(WalTimeRange timeRange) { + return writeAheadLogManager.getStaleBatchesForRange(timeRange); + } + +}