diff --git a/.circleci/config.yml b/.circleci/config.yml deleted file mode 100644 index 29a597bc13e..00000000000 --- a/.circleci/config.yml +++ /dev/null @@ -1,174 +0,0 @@ -version: 2.1 - -orbs: - slack: circleci/slack@4.2.1 - build-tools: circleci/build-tools@2.9.0 - -jobs: - test: - parameters: - machine-size: - type: string - default: large - build-type: - type: string - build-mysql: - type: string - default: "" - build-mariadb: - type: string - default: "" - build-postgresql: - type: string - default: "" - machine: - image: ubuntu-2004:202010-01 - resource_class: << parameters.machine-size >> - steps: - - run: - name: Halt builds except SBT test suite if there is no PR associated with the commit - command: | - if [[ -z "${CI_PULL_REQUEST}" ]] && [[ "${BUILD_TYPE}" != "sbt" ]] ; then - circleci-agent step halt - fi - - checkout - - run: - name: Custom step - configure GIT identity - command: | - git config user.email "circleci@example.com" - git config user.name "CircleCI" - - build-tools/merge-with-parent: - parent: develop - - restore_cache: - key: sbt-cache - - run: - command: src/ci/bin/test.sh - no_output_timeout: 1h - - run: - name: Do tricks to avoid unnecessary cache updates - command: | - find ~/.ivy2/cache -name "ivydata-*.properties" -print -delete - find ~/.sbt -name "*.lock" -print -delete - - store_test_results: - path: target/test-reports - - save_cache: - key: sbt-cache - paths: - - "~/.ivy2/cache" - - "~/.sbt" - environment: - CIRCLE_COMMIT_RANGE: << pipeline.git.base_revision >>...<< pipeline.git.revision >> - BUILD_TYPE: << parameters.build-type >> - BUILD_MYSQL: << parameters.build-mysql >> - BUILD_MARIADB: << parameters.build-mariadb >> - BUILD_POSTGRESQL: << parameters.build-postgresql >> - -workflows: - all-tests: - jobs: - - test: - name: testSbt - build-type: "sbt" - - test: - name: testSingleWorkflowRunner - build-type: "singleWorkflowRunner" - - test: - name: testDbms - build-type: "dbms" - - test: - name: testHoricromtalDeadlock - build-type: "horicromtalDeadlock" - - test: - name: testDockerScripts - build-type: "dockerScripts" - - test: - name: testReferenceDiskManifestBuilderApp - build-type: "referenceDiskManifestBuilderApp" - - test: - name: testCentaurAws - build-type: "centaurAws" - build-mysql: "5.7" - - test: - name: testCentaurDummy - build-type: "centaurDummy" - build-mysql: "5.7" - - test: - name: testCentaurEngineUpgradeLocal - build-type: "centaurEngineUpgradeLocal" - build-mysql: "5.7" - - test: - name: testCentaurEngineUpgradePapiV2alpha1 - build-type: "centaurEngineUpgradePapiV2alpha1" - build-mysql: "5.7" - - test: - name: testCentaurHoricromtalPapiV2alpha1 - build-type: "centaurHoricromtalPapiV2alpha1" - build-mysql: "5.7" - - test: - name: testCentaurHoricromtalPapiV2beta-MySQL - build-type: "centaurHoricromtalPapiV2beta" - build-mysql: "5.7" - - test: - name: testCentaurHoricromtalPapiV2beta-MariaDB - build-type: "centaurHoricromtalPapiV2beta" - build-mariadb: "10.3" - - test: - name: testCentaurHoricromtalEngineUpgradePapiV2alpha1-MySQL - build-type: "centaurHoricromtalEngineUpgradePapiV2alpha1" - build-mysql: "5.7" - - test: - name: testCentaurHoricromtalEngineUpgradePapiV2alpha1-MariaDB - build-type: "centaurHoricromtalEngineUpgradePapiV2alpha1" - build-mariadb: "10.3" - - test: - name: testCentaurPapiUpgradePapiV2alpha1 - build-type: "centaurPapiUpgradePapiV2alpha1" - build-mysql: "5.7" - - test: - name: testCentaurPapiUpgradeNewWorkflowsPapiV2alpha1 - build-type: "centaurPapiUpgradeNewWorkflowsPapiV2alpha1" - build-mysql: "5.7" - - test: - name: testCentaurLocal-MySQL - build-type: "centaurLocal" - build-mysql: "5.7" - - test: - name: testCentaurLocal-Postgresql - build-type: "centaurLocal" - build-postgresql: "11.3" - - test: - name: testCentaurPapiV2alpha1 - build-type: "centaurPapiV2alpha1" - build-mysql: "5.7" - - test: - name: testCentaurPapiV2beta - build-type: "centaurPapiV2beta" - build-mysql: "5.7" - - test: - name: testCentaurSlurm - build-type: "centaurSlurm" - build-mysql: "5.7" - - test: - name: testCentaurTes - build-type: "centaurTes" - build-mysql: "5.7" - - test: - name: testCentaurWdlUpgradeLocal - build-type: "centaurWdlUpgradeLocal" - build-mysql: "5.7" - - test: - name: testCheckPublish - build-type: "checkPublish" - build-mysql: "5.7" - - test: - name: testConformanceLocal - build-type: "conformanceLocal" - build-mysql: "5.7" - - test: - name: testConformancePapiV2beta - build-type: "conformancePapiV2beta" - build-mysql: "5.7" - - test: - name: testConformanceTesk - build-type: "conformanceTesk" - build-mysql: "5.7" diff --git a/.github/workflows/chart_update_on_merge.yml b/.github/workflows/chart_update_on_merge.yml index aa7a2c9c9ad..a2b14f2ec65 100644 --- a/.github/workflows/chart_update_on_merge.yml +++ b/.github/workflows/chart_update_on_merge.yml @@ -9,8 +9,14 @@ jobs: chart-update: name: Cromwhelm Chart Auto Updater if: github.event.pull_request.merged == true - runs-on: self-hosted # Faster machines; see https://github.com/broadinstitute/cromwell/settings/actions/runners + runs-on: ubuntu-latest steps: + - name: Fetch Jira ID from the commit message + id: fetch-jira-id + run: | + JIRA_ID=$(echo '${{ github.event.pull_request.title }}' | grep -Eo '[A-Z][A-Z]+-[0-9]+' | xargs echo -n | tr '[:space:]' ',') + [[ -z "$JIRA_ID" ]] && { echo "No Jira ID found in $1" ; exit 1; } + echo "JIRA_ID=$JIRA_ID" >> $GITHUB_OUTPUT - name: Clone Cromwell uses: actions/checkout@v2 with: @@ -49,6 +55,7 @@ jobs: username: dsdejenkins password: ${{ secrets.DSDEJENKINS_PASSWORD }} # Build & push `cromwell`, `womtool`, `cromiam`, and `cromwell-drs-localizer` + # This step is validated in the GHA 'docker_build_test.yml' without the accompanying docker push - name: Build Cromwell Docker run: | set -e @@ -68,7 +75,7 @@ jobs: repository: broadinstitute/terra-helmfile event-type: update-service client-payload: '{"service": "cromiam", "version": "${{ env.CROMWELL_VERSION }}", "dev_only": false}' - - name: Edit & push chart + - name: Edit & push cromwhelm chart env: BROADBOT_GITHUB_TOKEN: ${{ secrets.BROADBOT_GITHUB_TOKEN }} run: | @@ -76,10 +83,53 @@ jobs: cd cromwhelm git checkout main ls -la - sed -i "s/appVersion.*/appVersion: \"$CROMWELL_VERSION\"/" cromwell-helm/Chart.yaml - sed -i "s/image: broadinstitute\/cromwell.*/image: broadinstitute\/cromwell:$CROMWELL_VERSION/" cromwell-helm/templates/cromwell.yaml + sed -i "s|image: broadinstitute/cromwell:.*|image: broadinstitute/cromwell:$CROMWELL_VERSION|" terra-batch-libchart/values.yaml + git diff git config --global user.name "broadbot" git config --global user.email "broadbot@broadinstitute.org" - git commit -am "Auto update to Cromwell $CROMWELL_VERSION" + git commit -am "${{ steps.fetch-jira-id.outputs.JIRA_ID }}: Auto update to Cromwell $CROMWELL_VERSION" git push https://broadbot:$BROADBOT_GITHUB_TOKEN@github.com/broadinstitute/cromwhelm.git main + cd - + + - name: Clone terra-helmfile + uses: actions/checkout@v3 + with: + repository: broadinstitute/terra-helmfile + token: ${{ secrets.BROADBOT_GITHUB_TOKEN }} # Has to be set at checkout AND later when pushing to work + path: terra-helmfile + + - name: Update workflows-app in terra-helmfile + run: | + set -e + cd terra-helmfile + sed -i "s|image: broadinstitute/cromwell:.*|image: broadinstitute/cromwell:$CROMWELL_VERSION|" charts/workflows-app/values.yaml + cd - + + - name: Update cromwell-runner-app in terra-helmfile + run: | + set -e + cd terra-helmfile + sed -i "s|image: broadinstitute/cromwell:.*|image: broadinstitute/cromwell:$CROMWELL_VERSION|" charts/cromwell-runner-app/values.yaml + cd - + + + - name: Make PR in terra-helmfile + env: + BROADBOT_TOKEN: ${{ secrets.BROADBOT_GITHUB_TOKEN }} + GH_TOKEN: ${{ secrets.BROADBOT_GITHUB_TOKEN }} + run: | + set -e + JIRA_ID=${{ steps.fetch-jira-id.outputs.JIRA_ID }} + if [[ $JIRA_ID == "missing" ]]; then + echo "JIRA_ID missing, PR to terra-helmfile will not be created" + exit 0; + fi + cd terra-helmfile + git checkout -b ${JIRA_ID}-cromwell-update-$CROMWELL_VERSION + git config --global user.name "broadbot" + git config --global user.email "broadbot@broadinstitute.org" + git commit -am "${JIRA_ID}: Auto update Cromwell to $CROMWELL_VERSION in workflows-app and cromwell-runner-app" + git push -u origin ${JIRA_ID}-cromwell-update-$CROMWELL_VERSION + gh pr create --title "${JIRA_ID}: auto update Cromwell version to $CROMWELL_VERSION in workflows-app and cromwell-runner-app" --body "${JIRA_ID} helm chart update" --label "automerge" + cd - diff --git a/.github/workflows/consumer_contract_tests.yml b/.github/workflows/consumer_contract_tests.yml new file mode 100644 index 00000000000..0970e45e863 --- /dev/null +++ b/.github/workflows/consumer_contract_tests.yml @@ -0,0 +1,143 @@ +name: Consumer contract tests +# The purpose of this workflow is to run a suite of Cromwell contract tests against mock service provider(s) using Pact framework. +# +# More details about Contract Testing can be found in our handbook +# +# https://broadworkbench.atlassian.net/wiki/spaces/IRT/pages/2660368406/Getting+Started+with+Pact+Contract+Testing +# +# This workflow involves Cromwell as a consumer, and ANY provider (e.g. Sam) Cromwell consumes. +# Each party owns a set of tests (aka contract tests). +# +# Consumer contract tests (aka consumer tests) runs on a mock provider service and does not require a real provider service. +# Provider contract tests (aka provider verification tests) runs independently of any consumer. +# +# Specifically: +# Cromwell runs consumer tests against mock service. Upon success, publish consumer pacts to +# Pact Broker https://pact-broker.dsp-eng-tools.broadinstitute.org/. +# +# Pact Broker is the source of truth to forge contractual obligations between consumer and provider. +# +# This workflow meets the criteria of Pact Broker *Platinum* as described in https://docs.pact.io/pact_nirvana/step_6. +# The can-i-deploy job has been added to this workflow to support *Platinum* and gate the code for promotion to default branch. +# +# This is how it works. +# +# Consumer makes a change that results in a new pact published to Pact Broker. +# Pact Broker notifies provider(s) of the changed pact and trigger corresponding verification workflows. +# Provider downloads relevant versions of consumer pacts from Pact Broker and kicks off verification tests against the consumer pacts. +# Provider updates Pact Broker with verification status. +# Consumer kicks off can-i-deploy on process to determine if changes can be promoted and used for deployment. +# +# NOTE: The publish-contracts workflow will use the latest commit of the branch that triggers this workflow to publish the unique consumer contract version to Pact Broker. + +on: + pull_request: + paths-ignore: + - 'README.md' + push: + paths-ignore: + - 'README.md' + merge_group: + branches: + - develop + +jobs: + init-github-context: + runs-on: ubuntu-latest + outputs: + repo-branch: ${{ steps.extract-branch.outputs.repo-branch }} + repo-version: ${{ steps.extract-branch.outputs.repo-version }} + fork: ${{ steps.extract-branch.outputs.fork }} + + steps: + - uses: actions/checkout@v3 + + - name: Extract branch + id: extract-branch + run: | + GITHUB_EVENT_NAME=${{ github.event_name }} + if [[ "$GITHUB_EVENT_NAME" == "push" ]]; then + GITHUB_REF=${{ github.ref }} + GITHUB_SHA=${{ github.sha }} + elif [[ "$GITHUB_EVENT_NAME" == "pull_request" ]]; then + GITHUB_REF=refs/heads/${{ github.head_ref }} + GITHUB_SHA=${{ github.event.pull_request.head.sha }} + elif [[ "$GITHUB_EVENT_NAME" == "merge_group" ]]; then + GITHUB_REF=refs/heads/${{ github.head_ref }} + else + echo "Failed to extract branch information" + exit 1 + fi + echo "CURRENT_BRANCH=${GITHUB_REF/refs\/heads\//""}" >> $GITHUB_ENV + echo "CURRENT_SHA=$GITHUB_SHA" >> $GITHUB_ENV + + echo "repo-branch=${GITHUB_REF/refs\/heads\//""}" >> $GITHUB_OUTPUT + echo "repo-version=${GITHUB_SHA}" >> $GITHUB_OUTPUT + echo "fork=${FORK}" >> $GITHUB_OUTPUT + + - name: Is PR triggered by forked repo? + if: ${{ steps.extract-branch.outputs.fork == 'true' }} + run: | + echo "PR was triggered by forked repo" + + - name: Echo repo and branch information + run: | + echo "repo-owner=${{ github.repository_owner }}" + echo "repo-name=${{ github.event.repository.name }}" + echo "repo-branch=${{ steps.extract-branch.outputs.repo-branch }}" + echo "repo-version=${{ steps.extract-branch.outputs.repo-version }}" + echo "fork=${{ steps.extract-branch.outputs.fork }}" + + cromwell-consumer-contract-tests: + runs-on: ubuntu-latest + needs: [init-github-context] + outputs: + pact-b64: ${{ steps.encode-pact.outputs.pact-b64 }} + + steps: + - uses: actions/checkout@v3 + - name: Run consumer tests + run: | + docker run --rm -v $PWD:/working \ + -v jar-cache:/root/.ivy \ + -v jar-cache:/root/.ivy2 \ + -w /working \ + sbtscala/scala-sbt:openjdk-17.0.2_1.7.2_2.13.10 \ + sbt "project pact4s" clean test + + - name: Output consumer contract as non-breaking base64 string + id: encode-pact + run: | + cd pact4s + NON_BREAKING_B64=$(cat target/pacts/cromwell-consumer-drshub-provider.json | base64 -w 0) + echo "pact-b64=${NON_BREAKING_B64}" >> $GITHUB_OUTPUT + + # Prevent untrusted sources from using PRs to publish contracts + # since access to secrets is not allowed. + publish-contracts: + runs-on: ubuntu-latest + if: ${{ needs.init-github-context.outputs.fork == 'false' || needs.init-github-context.outputs.fork == ''}} + needs: [init-github-context, cromwell-consumer-contract-tests] + steps: + - name: Dispatch to terra-github-workflows + uses: broadinstitute/workflow-dispatch@v3 + with: + workflow: .github/workflows/publish-contracts.yaml + repo: broadinstitute/terra-github-workflows + ref: refs/heads/main + token: ${{ secrets.BROADBOT_GITHUB_TOKEN }} # github token for access to kick off a job in the private repo + inputs: '{ "pact-b64": "${{ needs.cromwell-consumer-contract-tests.outputs.pact-b64 }}", "repo-owner": "${{ github.repository_owner }}", "repo-name": "${{ github.event.repository.name }}", "repo-branch": "${{ needs.init-github-context.outputs.repo-branch }}" }' + + can-i-deploy: + runs-on: ubuntu-latest + if: ${{ needs.init-github-context.outputs.fork == 'false' || needs.init-github-context.outputs.fork == ''}} + needs: [ init-github-context, publish-contracts ] + steps: + - name: Dispatch to terra-github-workflows + uses: broadinstitute/workflow-dispatch@v3 + with: + workflow: .github/workflows/can-i-deploy.yaml + repo: broadinstitute/terra-github-workflows + ref: refs/heads/main + token: ${{ secrets.BROADBOT_GITHUB_TOKEN }} # github token for access to kick off a job in the private repo + inputs: '{ "pacticipant": "cromwell-consumer", "version": "${{ needs.init-github-context.outputs.repo-version }}" }' diff --git a/.github/workflows/cromwell_unit_tests.yml b/.github/workflows/cromwell_unit_tests.yml index d0927f8b954..88951871d8f 100644 --- a/.github/workflows/cromwell_unit_tests.yml +++ b/.github/workflows/cromwell_unit_tests.yml @@ -9,6 +9,7 @@ run-name: ${{ github.actor }} running Cromwell sbt unit tests. on: workflow_dispatch: #Manual trigger from GitHub UI push: + merge_group: permissions: contents: read @@ -27,6 +28,10 @@ jobs: #Invoke SBT to run all unit tests for Cromwell. - name: Run tests + env: + AZURE_CLIENT_ID: ${{ secrets.VAULT_AZURE_CENTAUR_CLIENT_ID }} + AZURE_CLIENT_SECRET: ${{ secrets.VAULT_AZURE_CENTAUR_CLIENT_SECRET }} + AZURE_TENANT_ID: ${{ secrets.VAULT_AZURE_CENTAUR_TENANT_ID }} run: | set -e sbt "test" diff --git a/.github/workflows/docker_build_test.yml b/.github/workflows/docker_build_test.yml new file mode 100644 index 00000000000..01c2ea502c9 --- /dev/null +++ b/.github/workflows/docker_build_test.yml @@ -0,0 +1,37 @@ +name: 'Docker Build Test' + +# This test verifies that we can successfully build the same docker images that we release. +# Includes `cromwell`, `womtool`, `cromiam`, and `cromwell-drs-localizer` +# See chart_update_on_merge.yml for the actual release workflow. + +run-name: ${{ github.actor }} Docker Build Test + +on: + workflow_dispatch: + push: + merge_group: + +permissions: + contents: read + +jobs: + sbt-build: + name: sbt docker build + runs-on: ubuntu-latest + steps: + - name: Clone Cromwell + uses: actions/checkout@v2 + with: + repository: broadinstitute/cromwell + token: ${{ secrets.BROADBOT_GITHUB_TOKEN }} + path: cromwell + - uses: olafurpg/setup-scala@v10 + with: + java-version: adopt@1.11 + # The following invocation should be as similar as possible to the one in chart_update_on_merge.yml + # To state the obvious: This test should not publish anything. It should simply verify that the build completes. + - name: Build Cromwell Docker + run: | + set -e + cd cromwell + sbt -Dproject.isSnapshot=false -Dproject.isRelease=false docker diff --git a/.github/workflows/integration_tests.yml b/.github/workflows/integration_tests.yml new file mode 100644 index 00000000000..ebafe51064c --- /dev/null +++ b/.github/workflows/integration_tests.yml @@ -0,0 +1,117 @@ +name: 'Integration Tests' + +#This github action runs all of Cromwell's integration tests. + +# This is what shows up in the github workflows page as the title. Using github ternary syntax & format() function. +run-name: ${{ github.event_name == 'schedule' && 'Nightly Integration Testing' || format('{0} Integration Testing', github.actor) }} + +#What will trigger the workflow to run. +on: + workflow_dispatch: #Manual trigger from GitHub UI + push: + schedule: + - cron: '0 0 * * 1-5' + merge_group: + +permissions: + contents: read + +jobs: + integration-tests: + strategy: + fail-fast: false #disabling fail-fast means that even if one test fails, the others will still try to complete. + #Each entry below is a single integration test that lives in /src/ci/bin/. + #Each will be launched on its own runner so they can occur in parallel. + #Friendly names are displayed on the Github UI and aren't used anywhere else. + matrix: + # Batch test fixes to land later + include: + - build_type: centaurGcpBatch + build_mysql: 5.7 + friendly_name: Centaur GCP Batch with MySQL 5.7 + - build_type: centaurPapiV2beta + build_mysql: 5.7 + friendly_name: Centaur Papi V2 Beta with MySQL 5.7 + - build_type: dbms + friendly_name: DBMS + - build_type: centaurTes + build_mysql: 5.7 + friendly_name: Centaur TES with MySQL 5.7 + - build_type: centaurLocal + build_mysql: 5.7 + friendly_name: Centaur Local with MySQL 5.7 + - build_type: checkPublish + friendly_name: Check Publish + - build_type: centaurAws + build_mysql: 5.7 + friendly_name: Centaur AWS with MySQL 5.7 + - build_type: centaurDummy + build_mysql: 5.7 + friendly_name: Centaur Dummy with MySQL 5.7 + - build_type: centaurHoricromtalPapiV2beta + build_mysql: 5.7 + friendly_name: Centaur Horicromtal PapiV2 Beta with MySQL 5.7 + - build_type: horicromtalDeadlock + friendly_name: Horicromtal Deadlock + - build_type: singleWorkflowRunner + friendly_name: Single Workflow Runner + - build_type: centaurLocal + build_mariadb: 10.3 + friendly_name: Centaur Local with MariaDB 10.3 + - build_type: centaurLocal + build_postgresql: 11.3 + friendly_name: Centaur Local with PostgreSQL 11.3 + - build_type: centaurEngineUpgradeLocal + build_mysql: 5.7 + friendly_name: Centaur Engine Upgrade Local with MySQL 5.7 + - build_type: referenceDiskManifestBuilderApp + friendly_name: Reference Disk Manifest Builder App + - build_type: centaurSlurm + build_mysql: 5.7 + friendly_name: "Centaur Slurm with MySQL 5.7" + - build_type: centaurBlob + build_mysql: 5.7 + friendly_name: Centaur Blob + name: ${{ matrix.friendly_name }} + env: + BUILD_NAME: ${{ matrix.build_type }} + BUILD_TYPE: ${{ matrix.build_type }} #intentionally duplicated variable + BUILD_MYSQL: ${{ matrix.build_mysql }} + BUILD_POSTGRESQL: ${{ matrix.build_postgresql }} + BUILD_MARIADB: ${{ matrix.build_mariadb }} + VAULT_ROLE_ID: ${{ secrets.VAULT_ROLE_ID_CI }} + VAULT_SECRET_ID: ${{ secrets.VAULT_SECRET_ID_CI }} + AZURE_CLIENT_ID: ${{ secrets.VAULT_AZURE_CENTAUR_CLIENT_ID }} + AZURE_CLIENT_SECRET: ${{ secrets.VAULT_AZURE_CENTAUR_CLIENT_SECRET }} + AZURE_TENANT_ID: ${{ secrets.VAULT_AZURE_CENTAUR_TENANT_ID }} + runs-on: ubuntu-latest + timeout-minutes: 120 + steps: + - uses: actions/checkout@v3 # checkout the cromwell repo + with: + ref: ${{ inputs.target-branch }} + - uses: ./.github/set_up_cromwell_action #This github action will set up git-secrets, caching, java, and sbt. + with: + cromwell_repo_token: ${{ secrets.BROADBOT_GITHUB_TOKEN }} + #This script bascially just looks up another script to run, assuming that the other script's filename is: + #src/ci/bin/test${BUILD_TYPE}.sh. The first letter of the BUILD_TYPE is automatically capitalized when looking. + - name: Run Integration Test + shell: 'script -q -e -c "bash --noprofile --norc -eo pipefail {0}"' #See comment below + run: | + set -e + echo Running test.sh + ./src/ci/bin/test.sh + # always() is some github magic that forces the following step to run, even when the previous fails. + # Without it, the if statement won't be evaluated on a test failure. + - uses: ravsamhq/notify-slack-action@v2 + if: always() && github.ref == 'refs/heads/develop' #only report on failures against develop. + with: + status: ${{ job.status }} + notify_when: "failure" + env: + SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_URL }} + #The "shell: ..."" line is a way to force the Github Action Runner to use a bash shell that thinks it has a TTY. + #The issue and solution are described here: https://github.com/actions/runner/issues/241#issuecomment-842566950 + #This is only needed for ReferenceDiskManifestBuilderApp test. + #This test uses fancy colors in the output, which likely causes the problem. + #See WX-938. diff --git a/.github/workflows/make_publish_prs.yml b/.github/workflows/make_publish_prs.yml index ced80154c80..e4e98a7f2f0 100644 --- a/.github/workflows/make_publish_prs.yml +++ b/.github/workflows/make_publish_prs.yml @@ -16,7 +16,7 @@ on: jobs: make-firecloud-develop-pr: name: Create firecloud-develop PR - runs-on: self-hosted # Faster machines; see https://github.com/broadinstitute/cromwell/settings/actions/runners + runs-on: ubuntu-latest steps: - name: Clone firecloud-develop uses: actions/checkout@v2 @@ -50,7 +50,7 @@ jobs: git config --global user.email "broadbot@broadinstitute.org" git commit -m "Updating Cromwell version to ${NEW_CROMWELL_V}" git push https://broadbot:$BROADBOT_GITHUB_TOKEN@github.com/broadinstitute/firecloud-develop.git ${NEW_BRANCH_NAME} - echo ::set-output name=NEW_BRANCH_NAME::${NEW_BRANCH_NAME} + echo "NEW_BRANCH_NAME=${NEW_BRANCH_NAME}" >> $GITHUB_OUTPUT - name: Create firecloud-develop PR uses: actions/github-script@v6 with: @@ -70,4 +70,3 @@ jobs: 'It updates cromwell from version ${{ github.event.inputs.old_cromwell_version }} to ${{ github.event.inputs.new_cromwell_version }}.' ].join('\n') }); - diff --git a/.github/workflows/trivy.yml b/.github/workflows/trivy.yml index 0590d48ef53..b005da65041 100644 --- a/.github/workflows/trivy.yml +++ b/.github/workflows/trivy.yml @@ -46,7 +46,7 @@ jobs: # export image name from the log image=$(grep 'Tagging image' build.log | awk '{print $NF}') - echo "::set-output name=image::${image}" + echo "image=${image}" >> $GITHUB_OUTPUT # scan the image - uses: broadinstitute/dsp-appsec-trivy-action@v1 diff --git a/.github/workflows/validate_pr_name.yml b/.github/workflows/validate_pr_name.yml new file mode 100644 index 00000000000..db26bbd95c6 --- /dev/null +++ b/.github/workflows/validate_pr_name.yml @@ -0,0 +1,23 @@ +# A github action to validate the name of a pull request contains a Jira tag: + +name: Validate PR name + +on: + pull_request: + types: [opened, edited, synchronize] + +jobs: + validate_pr_name: + runs-on: ubuntu-latest + steps: + - name: Validate PR title + id: validate + uses: actions/github-script@v3 + with: + github-token: ${{ secrets.GITHUB_TOKEN }} + script: | + const title = context.payload.pull_request.title; + const regex = /[A-Z][A-Z]+-\d+/; + if (!regex.test(title)) { + core.setFailed("PR title must contain a Jira tag"); + } diff --git a/.gitignore b/.gitignore index 571a12c5873..a5b72f6b263 100644 --- a/.gitignore +++ b/.gitignore @@ -17,6 +17,12 @@ tags target /site +#from running integration tests locally +actual.json +console_output.txt +expected.json +run_mode_metadata.json + # custom config cromwell-executions cromwell-test-executions @@ -38,7 +44,6 @@ cromwell-service-account.json cwl_conformance_test.inputs.json dockerhub_provider_config_v1.inc.conf dockerhub_provider_config_v2.inc.conf -github_private_deploy_key papi_application.inc.conf papi_refresh_token.options.json papi_v2_gcsa.options.json diff --git a/.travis.yml b/.travis.yml deleted file mode 100644 index 9ffb48b1147..00000000000 --- a/.travis.yml +++ /dev/null @@ -1,88 +0,0 @@ -os: linux -dist: focal -services: - - docker -language: minimal -git: - depth: false -cache: - directories: - - $HOME/.ivy2/cache - - $HOME/.coursier/cache - # see cromwell::private::delete_sbt_boot for more info - #- $HOME/.sbt/boot/ -before_cache: - # Tricks to avoid unnecessary cache updates - - find $HOME/.ivy2/cache -name "ivydata-*.properties" -print -delete - - find $HOME/.coursier/cache -name "ivydata-*.properties" -print -delete - - find $HOME/.sbt -name "*.lock" -print -delete -env: - jobs: - # Setting this variable twice will cause the 'script' section to run twice with the respective env var invoked - - >- - BUILD_TYPE=centaurAws - BUILD_MYSQL=5.7 - - >- - BUILD_TYPE=centaurDummy - BUILD_MYSQL=5.7 - - >- - BUILD_TYPE=centaurEngineUpgradeLocal - BUILD_MYSQL=5.7 - # Temporarily keeping until `centaurEngineUpgradePapiV2beta` or similar exists - - >- - BUILD_TYPE=centaurEngineUpgradePapiV2alpha1 - BUILD_MYSQL=5.7 - - >- - BUILD_TYPE=centaurHoricromtalPapiV2beta - BUILD_MYSQL=5.7 - - >- - BUILD_TYPE=centaurLocal - BUILD_MARIADB=10.3 - - >- - BUILD_TYPE=centaurLocal - BUILD_MYSQL=5.7 - - >- - BUILD_TYPE=centaurLocal - BUILD_POSTGRESQL=11.3 - - >- - BUILD_TYPE=centaurPapiV2beta - BUILD_MYSQL=5.7 - - >- - BUILD_TYPE=centaurSlurm - BUILD_MYSQL=5.7 - - >- - BUILD_TYPE=centaurTes - BUILD_MYSQL=5.7 - - >- - BUILD_TYPE=checkPublish - - >- - BUILD_TYPE=horicromtalDeadlock - - >- - BUILD_TYPE=sbt - BUILD_SBT_INCLUDE=engine - - >- - BUILD_TYPE=sbt - BUILD_SBT_INCLUDE=server - - >- - BUILD_TYPE=sbt - BUILD_SBT_INCLUDE=services - - >- - BUILD_TYPE=sbt - BUILD_SBT_EXCLUDE='engine|server|services' - - >- - BUILD_TYPE=dbms - - >- - BUILD_TYPE=singleWorkflowRunner - - >- - BUILD_TYPE=referenceDiskManifestBuilderApp -script: - - src/ci/bin/test.sh -notifications: - slack: - rooms: - - secure: B5KYcnhk/ujAUWlHsjzP7ROLm6MtYhaGikdYf6JYINovhMbVKnZCTlZEy7rqT3L2T5uJ25iefD500VQGk1Gn7puQ1sNq50wqjzQaj20PWEiBwoWalcV/nKBcQx1TyFT13LJv8fbFnVPxFCkC3YXoHedx8qAhDs8GH/tT5J8XOC8= - template: - - "Build <%{build_url}|#%{build_number}> (<%{compare_url}|%{commit}>) of %{repository}@%{branch} by %{author} %{result} in %{duration}" - on_success: change - on_failure: change - on_pull_requests: false diff --git a/CHANGELOG.md b/CHANGELOG.md index 0084c05171b..a581852c02e 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,19 @@ # Cromwell Change Log +## 86 Release Notes + +### GCP Batch +Cromwell now supports the GCP Batch backend for running workflows. See `Backend` in [ReadTheDocs](https://cromwell.readthedocs.io/en/stable/) for more information. + +### Workflow Completion Callback +Cromwell can be configured to send a POST request to a specified URL when a workflow completes. The request body includes the workflow ID, terminal state, +and (if applicable) final outputs or error message. See `WorkflowCallback` in [ReadTheDocs](https://cromwell.readthedocs.io/en/stable/) for more information. + +### Other Improvements +* Cromwell will now parallelize the downloads of DRS files that resolve to signed URLs. This significantly reduces the time localization takes in certain situations. +* WDL size engine function now works for HTTP files +* Improved Cromwell's handling of docker manifests. Additional logging information is emitted, and Cromwell will fall back to using OCI manifests if it encounters an error with a Docker Image Manifest V2. + ## 85 Release Notes ### Migration of PKs to BIGINT diff --git a/CODEOWNERS b/CODEOWNERS new file mode 100644 index 00000000000..34ece8d7792 --- /dev/null +++ b/CODEOWNERS @@ -0,0 +1,5 @@ +# These owners will be the default owners for everything in +# the repo. Unless a later match takes precedence, +# @broadinstitute/dsp-batch will be requested for +# review when someone opens a pull request. +* @broadinstitute/dsp-batch diff --git a/README.md b/README.md index fac1541cf8e..f6e3f8e742e 100644 --- a/README.md +++ b/README.md @@ -1,4 +1,3 @@ -[![Build Status](https://travis-ci.com/broadinstitute/cromwell.svg?branch=develop)](https://travis-ci.com/broadinstitute/cromwell?branch=develop) [![codecov](https://codecov.io/gh/broadinstitute/cromwell/branch/develop/graph/badge.svg)](https://codecov.io/gh/broadinstitute/cromwell) ## Welcome to Cromwell diff --git a/azure-blob-nio/README.md b/azure-blob-nio/README.md new file mode 100644 index 00000000000..ad6c553eabf --- /dev/null +++ b/azure-blob-nio/README.md @@ -0,0 +1,5 @@ +# Azure Storage Blob NIO FileSystemProvider + +[This is a copy of the NIO Filesystem implementation version 12.0.0-beta.19](https://github.com/Azure/azure-sdk-for-java/tree/2490e1e19e8531fe0a6378f40e299e7ec64cf3aa/sdk/storage/azure-storage-blob-nio) + +For more information on the initial design and commit history see the Azure SDK repository linked above. Changes to this repo were necessary to support some of the specific needs Cromwell as an App on Azure has as a system in Terra. This is something that has some precedent as it has been done for other filesystems in the past. diff --git a/azure-blob-nio/assets.json b/azure-blob-nio/assets.json new file mode 100644 index 00000000000..c262f7ebafc --- /dev/null +++ b/azure-blob-nio/assets.json @@ -0,0 +1,6 @@ +{ + "AssetsRepo": "Azure/azure-sdk-assets", + "AssetsRepoPrefixPath": "java", + "TagPrefix": "java/storage/azure-storage-blob-nio", + "Tag": "java/storage/azure-storage-blob-nio_b2a0ce219e" +} diff --git a/azure-blob-nio/src/main/java/com/azure/storage/blob/nio/AzureBasicFileAttributeView.java b/azure-blob-nio/src/main/java/com/azure/storage/blob/nio/AzureBasicFileAttributeView.java new file mode 100644 index 00000000000..43744893ccb --- /dev/null +++ b/azure-blob-nio/src/main/java/com/azure/storage/blob/nio/AzureBasicFileAttributeView.java @@ -0,0 +1,69 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package com.azure.storage.blob.nio; + +import com.azure.core.util.logging.ClientLogger; + +import java.io.IOException; +import java.nio.file.Path; +import java.nio.file.attribute.BasicFileAttributeView; +import java.nio.file.attribute.FileTime; + +/** + * Provides support for basic file attributes. + *
+ * The operations supported by this view and the attributes it reads are a strict subset of + * {@link AzureBlobFileAttributeView} and has the same network behavior. Therefore, while this type is offered for + * compliance with the NIO spec, {@link AzureBlobFileAttributeView} is generally preferred. + *
+ * {@link #setTimes(FileTime, FileTime, FileTime)} is not supported. + */ +public final class AzureBasicFileAttributeView implements BasicFileAttributeView { + private static final ClientLogger LOGGER = new ClientLogger(AzureBasicFileAttributeView.class); + + static final String NAME = "azureBasic"; + + private final Path path; + + AzureBasicFileAttributeView(Path path) { + this.path = path; + } + + /** + * Returns the name of the attribute view: {@code "azureBasic"} + * + * @return the name of the attribute view: {@code "azureBasic"} + */ + @Override + public String name() { + return NAME; + } + + /** + * Reads the basic file attributes as a bulk operation. + *
+ * All file attributes are read as an atomic operation with respect to other file system operations. + * + * @return {@link AzureBasicFileAttributes} + */ + @Override + public AzureBasicFileAttributes readAttributes() throws IOException { + AzurePath.ensureFileSystemOpen(path); + return new AzureBasicFileAttributes(path); + } + + /** + * Unsupported. + * + * @param lastModifiedTime the new last modified time, or null to not change the value + * @param lastAccessTime the last access time, or null to not change the value + * @param createTime the file's create time, or null to not change the value + * @throws UnsupportedOperationException Operation not supported. + * @throws IOException never + */ + @Override + public void setTimes(FileTime lastModifiedTime, FileTime lastAccessTime, FileTime createTime) throws IOException { + throw LoggingUtility.logError(LOGGER, new UnsupportedOperationException()); + } +} diff --git a/azure-blob-nio/src/main/java/com/azure/storage/blob/nio/AzureBasicFileAttributes.java b/azure-blob-nio/src/main/java/com/azure/storage/blob/nio/AzureBasicFileAttributes.java new file mode 100644 index 00000000000..d1ab6d28562 --- /dev/null +++ b/azure-blob-nio/src/main/java/com/azure/storage/blob/nio/AzureBasicFileAttributes.java @@ -0,0 +1,165 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package com.azure.storage.blob.nio; + +import java.io.IOException; +import java.nio.file.Path; +import java.nio.file.attribute.BasicFileAttributes; +import java.nio.file.attribute.FileAttribute; +import java.nio.file.attribute.FileTime; +import java.util.Collections; +import java.util.HashSet; +import java.util.Set; + +/** + * Provides support for basic file attributes. + *
+ * The properties available on this type are a strict subset of {@link AzureBlobFileAttributes}, and the two types have + * the same network behavior. Therefore, while this type is offered for compliance with the NIO spec, + * {@link AzureBlobFileAttributes} is generally preferred. + *
+ * Some attributes are not supported. Refer to the javadocs on each method for more information. + *
+ * If the target file is a virtual directory, most attributes will be set to null.
+ */
+public final class AzureBasicFileAttributes implements BasicFileAttributes {
+ // For verifying parameters on FileSystemProvider.readAttributes
+ static final Set
+ * Last access time is not supported by the blob service. In this case, it is typical for implementations to return
+ * the {@link #lastModifiedTime()}.
+ *
+ * @return the time of last modification or null if this is a virtual directory
+ */
+ @Override
+ public FileTime lastAccessTime() {
+ return this.internalAttributes.lastAccessTime();
+ }
+
+ /**
+ * Returns the creation time. The creation time is the time that the file was created. Returns null if this is a
+ * virtual directory.
+ *
+ * @return The creation time or null if this is a virtual directory
+ */
+ @Override
+ public FileTime creationTime() {
+ return this.internalAttributes.creationTime();
+ }
+
+ /**
+ * Tells whether the file is a regular file with opaque content.
+ *
+ * @return whether the file is a regular file.
+ */
+ @Override
+ public boolean isRegularFile() {
+ return this.internalAttributes.isRegularFile();
+ }
+
+ /**
+ * Tells whether the file is a directory.
+ *
+ * Will only return true if the directory is a concrete directory. See
+ * {@link AzureFileSystemProvider#createDirectory(Path, FileAttribute[])} for more information on virtual and
+ * concrete directories.
+ *
+ * @return whether the file is a directory
+ */
+ @Override
+ public boolean isDirectory() {
+ return this.internalAttributes.isDirectory();
+ }
+
+ /**
+ * Tells whether the file is a virtual directory.
+ *
+ * See {@link AzureFileSystemProvider#createDirectory(Path, FileAttribute[])} for more information on virtual and
+ * concrete directories.
+ *
+ * @return whether the file is a virtual directory
+ */
+ public boolean isVirtualDirectory() {
+ return this.internalAttributes.isVirtualDirectory();
+ }
+
+ /**
+ * Tells whether the file is a symbolic link.
+ *
+ * @return false. Symbolic links are not supported.
+ */
+ @Override
+ public boolean isSymbolicLink() {
+ return this.internalAttributes.isSymbolicLink();
+ }
+
+ /**
+ * Tells whether the file is something other than a regular file, directory, or symbolic link.
+ *
+ * @return false. No other object types are supported.
+ */
+ @Override
+ public boolean isOther() {
+ return this.internalAttributes.isOther();
+ }
+
+ /**
+ * Returns the size of the file (in bytes).
+ *
+ * @return the size of the file
+ */
+ @Override
+ public long size() {
+ return this.internalAttributes.size();
+ }
+
+ /**
+ * Returns the url of the resource.
+ *
+ * @return The file key, which is the url.
+ */
+ @Override
+ public Object fileKey() {
+ return this.internalAttributes.fileKey();
+ }
+}
diff --git a/azure-blob-nio/src/main/java/com/azure/storage/blob/nio/AzureBlobFileAttributeView.java b/azure-blob-nio/src/main/java/com/azure/storage/blob/nio/AzureBlobFileAttributeView.java
new file mode 100644
index 00000000000..d9366e22417
--- /dev/null
+++ b/azure-blob-nio/src/main/java/com/azure/storage/blob/nio/AzureBlobFileAttributeView.java
@@ -0,0 +1,157 @@
+// Copyright (c) Microsoft Corporation. All rights reserved.
+// Licensed under the MIT License.
+
+package com.azure.storage.blob.nio;
+
+import com.azure.core.util.logging.ClientLogger;
+import com.azure.storage.blob.models.AccessTier;
+import com.azure.storage.blob.models.BlobHttpHeaders;
+import com.azure.storage.blob.models.BlobStorageException;
+import com.azure.storage.blob.specialized.BlobClientBase;
+
+import java.io.IOException;
+import java.io.UncheckedIOException;
+import java.nio.file.Path;
+import java.nio.file.attribute.BasicFileAttributeView;
+import java.nio.file.attribute.FileTime;
+import java.util.HashMap;
+import java.util.Map;
+import java.util.function.Consumer;
+
+/**
+ * A file attribute view that provides a view of attributes specific to files stored as blobs in Azure Storage.
+ *
+ * All attributes are retrieved from the file system as a bulk operation.
+ *
+ * {@link #setTimes(FileTime, FileTime, FileTime)} is not supported.
+ */
+public final class AzureBlobFileAttributeView implements BasicFileAttributeView {
+ private static final ClientLogger LOGGER = new ClientLogger(AzureBlobFileAttributeView.class);
+
+ static final String ATTR_CONSUMER_ERROR = "Exception thrown by attribute consumer";
+ static final String NAME = "azureBlob";
+
+ private final Path path;
+
+ AzureBlobFileAttributeView(Path path) {
+ this.path = path;
+ }
+
+ @SuppressWarnings("unchecked")
+ static Map
+ * All file attributes are read as an atomic operation with respect to other file system operations. A fresh copy is
+ * retrieved every time this method is called.
+ * @return {@link AzureBlobFileAttributes}
+ * @throws IOException if an IOException occurs.
+ */
+ @Override
+ public AzureBlobFileAttributes readAttributes() throws IOException {
+ AzurePath.ensureFileSystemOpen(path);
+ return new AzureBlobFileAttributes(path);
+ }
+
+ /**
+ * Sets the {@link BlobHttpHeaders} as an atomic operation.
+ *
+ * See {@link BlobClientBase#setHttpHeaders(BlobHttpHeaders)} for more information.
+ * @param headers {@link BlobHttpHeaders}
+ * @throws IOException if an IOException occurs.
+ */
+ public void setBlobHttpHeaders(BlobHttpHeaders headers) throws IOException {
+ AzurePath.ensureFileSystemOpen(path);
+ try {
+ new AzureResource(this.path).getBlobClient().setHttpHeaders(headers);
+ } catch (BlobStorageException e) {
+ throw LoggingUtility.logError(LOGGER, new IOException(e));
+ }
+ }
+
+ /**
+ * Sets the metadata as an atomic operation.
+ *
+ * See {@link BlobClientBase#setMetadata(Map)} for more information.
+ * @param metadata The metadata to associate with the blob
+ * @throws IOException if an IOException occurs.
+ */
+ public void setMetadata(Map
+ * See {@link BlobClientBase#setAccessTier(AccessTier)} for more information.
+ * @param tier {@link AccessTier}
+ * @throws IOException if an IOException occurs.
+ */
+ public void setTier(AccessTier tier) throws IOException {
+ AzurePath.ensureFileSystemOpen(path);
+ try {
+ new AzureResource(this.path).getBlobClient().setAccessTier(tier);
+ } catch (BlobStorageException e) {
+ throw LoggingUtility.logError(LOGGER, new IOException(e));
+ }
+ }
+
+ /**
+ * Unsupported.
+ *
+ * @param lastModifiedTime the new last modified time, or null to not change the value
+ * @param lastAccessTime the last access time, or null to not change the value
+ * @param createTime the file's create time, or null to not change the value
+ * @throws UnsupportedOperationException Operation not supported.
+ * @throws IOException never
+ */
+ @Override
+ public void setTimes(FileTime lastModifiedTime, FileTime lastAccessTime, FileTime createTime) throws IOException {
+ throw LoggingUtility.logError(LOGGER, new UnsupportedOperationException());
+ }
+}
diff --git a/azure-blob-nio/src/main/java/com/azure/storage/blob/nio/AzureBlobFileAttributes.java b/azure-blob-nio/src/main/java/com/azure/storage/blob/nio/AzureBlobFileAttributes.java
new file mode 100644
index 00000000000..c73d062e117
--- /dev/null
+++ b/azure-blob-nio/src/main/java/com/azure/storage/blob/nio/AzureBlobFileAttributes.java
@@ -0,0 +1,369 @@
+// Copyright (c) Microsoft Corporation. All rights reserved.
+// Licensed under the MIT License.
+
+package com.azure.storage.blob.nio;
+
+import java.io.IOException;
+import java.nio.file.Path;
+import java.nio.file.attribute.BasicFileAttributes;
+import java.nio.file.attribute.FileAttribute;
+import java.nio.file.attribute.FileTime;
+import java.time.OffsetDateTime;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.Map;
+import java.util.function.Supplier;
+
+import com.azure.core.util.logging.ClientLogger;
+import com.azure.storage.blob.models.AccessTier;
+import com.azure.storage.blob.models.ArchiveStatus;
+import com.azure.storage.blob.models.BlobHttpHeaders;
+import com.azure.storage.blob.models.BlobProperties;
+import com.azure.storage.blob.models.BlobStorageException;
+import com.azure.storage.blob.models.BlobType;
+import com.azure.storage.blob.models.CopyStatusType;
+
+/**
+ * Provides support for attributes associated with a file stored as a blob in Azure Storage.
+ *
+ * Some of the attributes inherited from {@link BasicFileAttributes} are not supported. See the docs on each method for
+ * more information.
+ *
+ * If the target file is a virtual directory, most attributes will be set to null.
+ */
+public final class AzureBlobFileAttributes implements BasicFileAttributes {
+ /*
+ Some blob properties do not have getters as they do not make sense in the context of nio. These properties are:
+ - incremental snapshot related properties (only for page blobs)
+ - lease related properties (leases not currently supported)
+ - sequence number (only for page blobs)
+ - encryption key sha256 (cpk not supported)
+ - committed block count (only for append blobs)
+ */
+
+ private static final ClientLogger LOGGER = new ClientLogger(AzureBlobFileAttributes.class);
+
+ private final BlobProperties properties;
+ private final AzureResource resource;
+ private final boolean isVirtualDirectory;
+
+ AzureBlobFileAttributes(Path path) throws IOException {
+ this.resource = new AzureResource(path);
+ BlobProperties props = null;
+ try {
+ props = resource.getBlobClient().getProperties();
+ } catch (BlobStorageException e) {
+ if (e.getStatusCode() == 404 && this.resource.checkVirtualDirectoryExists()) {
+ this.isVirtualDirectory = true;
+ this.properties = null;
+ return;
+ } else {
+ throw LoggingUtility.logError(LOGGER, new IOException("Path: " + path.toString(), e));
+ }
+ }
+ this.properties = props;
+ this.isVirtualDirectory = false;
+ }
+
+ static Map
+ * Last access time is not supported by the blob service. In this case, it is typical for implementations to return
+ * the {@link #lastModifiedTime()}.
+ *
+ * @return the time of last modification or null if this is a virtual directory
+ */
+ @Override
+ public FileTime lastAccessTime() {
+ return !this.isVirtualDirectory ? FileTime.from(this.properties.getLastAccessedTime().toInstant()) : null;
+ }
+
+ /**
+ * Tells whether the file is a regular file with opaque content.
+ *
+ * @return whether the file is a regular file.
+ */
+ @Override
+ public boolean isRegularFile() {
+ return !this.isVirtualDirectory
+ && !this.properties.getMetadata().getOrDefault(AzureResource.DIR_METADATA_MARKER, "false").equals("true");
+ }
+
+ /**
+ * Tells whether the file is a directory.
+ *
+ * Will return true if the directory is a concrete or virtual directory. See
+ * {@link AzureFileSystemProvider#createDirectory(Path, FileAttribute[])} for more information on virtual and
+ * concrete directories.
+ *
+ * @return whether the file is a directory
+ */
+ @Override
+ public boolean isDirectory() {
+ return !this.isRegularFile();
+ }
+
+ /**
+ * Tells whether the file is a virtual directory.
+ *
+ * See {@link AzureFileSystemProvider#createDirectory(Path, FileAttribute[])} for more information on virtual and
+ * concrete directories.
+ *
+ * @return whether the file is a virtual directory
+ */
+ public boolean isVirtualDirectory() {
+ return this.isVirtualDirectory;
+ }
+
+ /**
+ * Tells whether the file is a symbolic link.
+ *
+ * @return false. Symbolic links are not supported.
+ */
+ @Override
+ public boolean isSymbolicLink() {
+ return false;
+ }
+
+ /**
+ * Tells whether the file is something other than a regular file, directory, or symbolic link.
+ *
+ * @return false. No other object types are supported.
+ */
+ @Override
+ public boolean isOther() {
+ return false;
+ }
+
+ /**
+ * Returns the size of the file (in bytes).
+ *
+ * @return the size of the file
+ */
+ @Override
+ public long size() {
+ return !this.isVirtualDirectory ? properties.getBlobSize() : 0;
+ }
+
+ /**
+ * Returns the url of the resource.
+ *
+ * @return The file key, which is the url.
+ */
+ @Override
+ public Object fileKey() {
+ return resource.getBlobClient().getBlobUrl();
+ }
+}
diff --git a/azure-blob-nio/src/main/java/com/azure/storage/blob/nio/AzureDirectoryStream.java b/azure-blob-nio/src/main/java/com/azure/storage/blob/nio/AzureDirectoryStream.java
new file mode 100644
index 00000000000..817121e958e
--- /dev/null
+++ b/azure-blob-nio/src/main/java/com/azure/storage/blob/nio/AzureDirectoryStream.java
@@ -0,0 +1,189 @@
+// Copyright (c) Microsoft Corporation. All rights reserved.
+// Licensed under the MIT License.
+
+package com.azure.storage.blob.nio;
+
+import java.io.IOException;
+import java.nio.file.DirectoryIteratorException;
+import java.nio.file.DirectoryStream;
+import java.nio.file.Path;
+import java.util.HashSet;
+import java.util.Iterator;
+import java.util.NoSuchElementException;
+import java.util.Set;
+
+import com.azure.core.util.logging.ClientLogger;
+import com.azure.storage.blob.BlobContainerClient;
+import com.azure.storage.blob.models.BlobItem;
+import com.azure.storage.blob.models.BlobListDetails;
+import com.azure.storage.blob.models.ListBlobsOptions;
+
+/**
+ * A type for iterating over the contents of a directory.
+ *
+ * This type is asynchronously closeable, i.e. closing the stream from any thread will cause the stream to stop
+ * returning elements at that point.
+ *
+ * {@inheritDoc}
+ */
+public final class AzureDirectoryStream implements DirectoryStream
+ * It may be the case that the authentication method provided to this file system only
+ * supports read operations and hence the file store is implicitly read only in this view, but that does not
+ * imply the underlying container/file store is inherently read only. Creating/specifying read only file stores
+ * is not currently supported.
+ *
+ * @return false.
+ */
+ @Override
+ public boolean isReadOnly() {
+ return false;
+ }
+
+ /**
+ * Returns the size, in bytes, of the file store.
+ *
+ * Containers do not limit the amount of data stored. This method will always return max long.
+ *
+ * @return the size of the file store.
+ * @throws IOException If an I/O error occurs.
+ */
+ @Override
+ public long getTotalSpace() throws IOException {
+ return Long.MAX_VALUE;
+ }
+
+ /**
+ * Returns the number of bytes available to this Java virtual machine on the file store.
+ *
+ * Containers do not limit the amount of data stored. This method will always return max long.
+ *
+ * @return the number of bytes available on the file store.
+ * @throws IOException If an I/O error occurs.
+ */
+ @Override
+ public long getUsableSpace() throws IOException {
+ return Long.MAX_VALUE;
+ }
+
+ /**
+ * Returns the number of unallocated bytes in the file store.
+ *
+ * Containers do not limit the amount of data stored. This method will always return max long.
+ *
+ * @return the number of unallocated bytes in the file store.
+ * @throws IOException If an I/O error occurs.
+ */
+ @Override
+ public long getUnallocatedSpace() throws IOException {
+ return Long.MAX_VALUE;
+ }
+
+ /**
+ * Tells whether this file store supports the file attributes identified by the given file attribute view.
+ *
+ * All file stores in this file system support the following views:
+ *
+ * All file stores in this file system support the following views:
+ *
+ * This method always returns null as no {@link FileStoreAttributeView} is currently supported.
+ *
+ * @param aClass a class
+ * @return null
+ */
+ @Override
+ public
+ * This method always throws an {@code UnsupportedOperationException} as no {@link FileStoreAttributeView} is
+ * currently supported.
+ *
+ * @param s a string
+ * @return The attribute value.
+ * @throws UnsupportedOperationException unsupported
+ * @throws IOException never
+ */
+ @Override
+ public Object getAttribute(String s) throws IOException {
+ throw LoggingUtility.logError(LOGGER, new UnsupportedOperationException("FileStoreAttributeViews aren't"
+ + " supported."));
+ }
+
+ BlobContainerClient getContainerClient() {
+ return this.containerClient;
+ }
+}
diff --git a/azure-blob-nio/src/main/java/com/azure/storage/blob/nio/AzureFileSystem.java b/azure-blob-nio/src/main/java/com/azure/storage/blob/nio/AzureFileSystem.java
new file mode 100644
index 00000000000..8ca4361bd3e
--- /dev/null
+++ b/azure-blob-nio/src/main/java/com/azure/storage/blob/nio/AzureFileSystem.java
@@ -0,0 +1,534 @@
+// Copyright (c) Microsoft Corporation. All rights reserved.
+// Licensed under the MIT License.
+
+package com.azure.storage.blob.nio;
+
+import java.io.IOException;
+import java.nio.file.FileStore;
+import java.nio.file.FileSystem;
+import java.nio.file.InvalidPathException;
+import java.nio.file.Path;
+import java.nio.file.PathMatcher;
+import java.nio.file.WatchService;
+import java.nio.file.attribute.BasicFileAttributeView;
+import java.nio.file.attribute.FileAttributeView;
+import java.nio.file.attribute.UserPrincipalLookupService;
+import java.nio.file.spi.FileSystemProvider;
+import java.time.Duration;
+import java.time.Instant;
+import java.util.Arrays;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Map;
+import java.util.Objects;
+import java.util.Optional;
+import java.util.Set;
+import java.util.regex.PatternSyntaxException;
+
+import com.azure.core.credential.AzureSasCredential;
+import com.azure.core.http.HttpClient;
+import com.azure.core.http.policy.HttpLogDetailLevel;
+import com.azure.core.http.policy.HttpPipelinePolicy;
+import com.azure.core.util.CoreUtils;
+import com.azure.core.util.logging.ClientLogger;
+import com.azure.storage.blob.BlobServiceClient;
+import com.azure.storage.blob.BlobServiceClientBuilder;
+import com.azure.storage.blob.implementation.util.BlobUserAgentModificationPolicy;
+import com.azure.storage.common.StorageSharedKeyCredential;
+import com.azure.storage.common.policy.RequestRetryOptions;
+import com.azure.storage.common.policy.RetryPolicyType;
+
+/**
+ * Implement's Java's {@link FileSystem} interface for Azure Blob Storage.
+ *
+ * The following behavior is specific to this FileSystem:
+ *
+ * In the hierarchy of this file system, an {@code AzureFileSystem} corresponds to an Azure Blob Storage account. A
+ * file store is represented by a container in the storage account. Each container has one root directory.
+ *
+ * Closing the file system will not block on outstanding operations. Any operations in progress will be allowed to
+ * terminate naturally after the file system is closed, though no further operations may be started after the parent
+ * file system is closed.
+ *
+ * All instance of {@code AzureFileSystem} are opened for read-write access.
+ *
+ * For a more complete description of the uses for the constants described here, please see the instructions for opening
+ * and configuring a FileSystem in the docs of {@link FileSystemProvider}.
+ */
+public final class AzureFileSystem extends FileSystem {
+ private static final ClientLogger LOGGER = new ClientLogger(AzureFileSystem.class);
+
+ // Configuration constants for blob clients.
+ /**
+ * Expected type: String
+ */
+ public static final String AZURE_STORAGE_SHARED_KEY_CREDENTIAL = "AzureStorageSharedKeyCredential";
+
+ /**
+ * Expected type: String
+ */
+ public static final String AZURE_STORAGE_SAS_TOKEN_CREDENTIAL = "AzureStorageSasTokenCredential";
+
+ /**
+ * Expected type: String
+ */
+ public static final String AZURE_STORAGE_PUBLIC_ACCESS_CREDENTIAL = "AzureStoragePublicAccessCredential";
+
+ /**
+ * Expected type: com.azure.core.http.policy.HttpLogLevelDetail
+ */
+ public static final String AZURE_STORAGE_HTTP_LOG_DETAIL_LEVEL = "AzureStorageHttpLogDetailLevel";
+
+ /**
+ * Expected type: Integer
+ */
+ public static final String AZURE_STORAGE_MAX_TRIES = "AzureStorageMaxTries";
+
+ /**
+ * Expected type: Integer
+ */
+ public static final String AZURE_STORAGE_TRY_TIMEOUT = "AzureStorageTryTimeout";
+
+ /**
+ * Expected type: Long
+ */
+ public static final String AZURE_STORAGE_RETRY_DELAY_IN_MS = "AzureStorageRetryDelayInMs";
+
+ /**
+ * Expected type: Long
+ */
+ public static final String AZURE_STORAGE_MAX_RETRY_DELAY_IN_MS = "AzureStorageMaxRetryDelayInMs";
+
+ /**
+ * Expected type: com.azure.storage.common.policy.RetryPolicyType
+ */
+ public static final String AZURE_STORAGE_RETRY_POLICY_TYPE = "AzureStorageRetryPolicyType";
+
+ /**
+ * Expected type: String
+ */
+ public static final String AZURE_STORAGE_SECONDARY_HOST = "AzureStorageSecondaryHost";
+
+ /**
+ * Expected type: Long
+ */
+ public static final String AZURE_STORAGE_UPLOAD_BLOCK_SIZE = "AzureStorageUploadBlockSize";
+
+ /**
+ * Expected type: Integer
+ */
+ public static final String AZURE_STORAGE_MAX_CONCURRENCY_PER_REQUEST = "AzureStorageMaxConcurrencyPerRequest";
+
+ /**
+ * Expected type: Long
+ */
+ public static final String AZURE_STORAGE_PUT_BLOB_THRESHOLD = "AzureStoragePutBlobThreshold";
+
+ /**
+ * Expected type: Integer
+ */
+ public static final String AZURE_STORAGE_DOWNLOAD_RESUME_RETRIES = "AzureStorageDownloadResumeRetries";
+
+ static final String AZURE_STORAGE_HTTP_CLIENT = "AzureStorageHttpClient"; // undocumented; for test.
+ static final String AZURE_STORAGE_HTTP_POLICIES = "AzureStorageHttpPolicies"; // undocumented; for test.
+
+ /**
+ * Expected type: String
+ */
+ public static final String AZURE_STORAGE_FILE_STORES = "AzureStorageFileStores";
+
+ /**
+ * Expected type: Boolean
+ */
+ public static final String AZURE_STORAGE_SKIP_INITIAL_CONTAINER_CHECK = "AzureStorageSkipInitialContainerCheck";
+
+ static final String PATH_SEPARATOR = "/";
+
+ private static final Map
+ * After a file system is closed then all subsequent access to the file system, either by methods defined by this
+ * class or on objects associated with this file system, throw ClosedFileSystemException. If the file system is
+ * already closed then invoking this method has no effect.
+ *
+ * Closing the file system will not block on outstanding operations. Any operations in progress will be allowed to
+ * terminate naturally after the file system is closed, though no further operations may be started after the
+ * parent file system is closed.
+ *
+ * Once closed, a file system with the same identifier as the one closed may be re-opened.
+ *
+ * @throws IOException If an I/O error occurs.
+ */
+ @Override
+ public void close() throws IOException {
+ this.closed = true;
+ this.parentFileSystemProvider.closeFileSystem(this.getFileSystemUrl() + "/" + defaultFileStore.name());
+ }
+
+ /**
+ * Tells whether this file system is open.
+ *
+ * @return whether this file system is open.
+ */
+ @Override
+ public boolean isOpen() {
+ return !this.closed;
+ }
+
+ /**
+ * Tells whether this file system allows only read-only access to its file stores.
+ *
+ * Always returns false. It may be the case that the authentication method provided to this file system only
+ * supports read operations and hence the file system is implicitly read only in this view, but that does not
+ * imply the underlying account/file system is inherently read only. Creating/specifying read only file
+ * systems is not supported.
+ *
+ * @return false
+ */
+ @Override
+ public boolean isReadOnly() {
+ return false;
+ }
+
+ /**
+ * Returns the name separator, represented as a string.
+ *
+ * The separator used in this file system is {@code "/"}.
+ *
+ * @return "/"
+ */
+ @Override
+ public String getSeparator() {
+ return AzureFileSystem.PATH_SEPARATOR;
+ }
+
+ /**
+ * Returns an object to iterate over the paths of the root directories.
+ *
+ * The list of root directories corresponds to the list of available file stores and therefore containers specified
+ * upon initialization. A root directory always takes the form {@code "
+ * If a finite list of containers was provided on start up, this list will not change during the lifetime of this
+ * object. If containers are added to the account after initialization, they will be ignored. If a container is
+ * deleted or otherwise becomes unavailable, its root directory will still be returned but operations to it will
+ * fail.
+ *
+ * @return an object to iterate over the paths of the root directories
+ */
+ @Override
+ public Iterable
+ * This list will respect the parameters provided during initialization.
+ *
+ * If a finite list of containers was provided on start up, this list will not change during the lifetime of this
+ * object. If containers are added to the account after initialization, they will be ignored. If a container is
+ * deleted or otherwise becomes unavailable, its root directory will still be returned but operations to it will
+ * fail.
+ */
+ @Override
+ public Iterable
+ * This file system supports the following views:
+ *
+ * If more does not specify any elements then the value of the first parameter is the path string to convert. If
+ * more specifies one or more elements than each non-empty string, including first, is considered to be a sequence
+ * of name elements (see Path) and is joined to form a path string. The more will be joined using the name
+ * separator.
+ *
+ * Each name element will be {@code String}-joined to the other elements by this file system's first path separator.
+ * Naming conventions and allowed characters are as
+ * defined
+ * by the Azure Blob Storage service. The root component is interpreted as the container name and all name elements
+ * are interpreted as a part of the blob name. The character {@code ':'} is only allowed in the root component and
+ * must be the last character of the root component.
+ *
+ * @param first the path string or initial part of the path string
+ * @param more additional strings to be joined to form the path string
+ * @throws InvalidPathException if the path string cannot be converted.
+ */
+ @Override
+ public Path getPath(String first, String... more) {
+ return new AzurePath(this, first, more);
+ }
+
+ /**
+ * Unsupported.
+ *
+ * @param s the matcher
+ * @throws UnsupportedOperationException unsupported.
+ * @throws IllegalArgumentException never
+ * @throws PatternSyntaxException never
+ */
+ @Override
+ public PathMatcher getPathMatcher(String s) throws IllegalArgumentException, PatternSyntaxException {
+ throw LoggingUtility.logError(LOGGER, new UnsupportedOperationException());
+ }
+
+ /**
+ * Unsupported.
+ *
+ * @throws UnsupportedOperationException unsupported.
+ */
+ @Override
+ public UserPrincipalLookupService getUserPrincipalLookupService() {
+ throw LoggingUtility.logError(LOGGER, new UnsupportedOperationException());
+ }
+
+ /**
+ * Unsupported.
+ *
+ * @throws UnsupportedOperationException unsupported.
+ * @throws IOException Never thrown.
+ */
+ @Override
+ public WatchService newWatchService() throws IOException {
+ throw LoggingUtility.logError(LOGGER, new UnsupportedOperationException());
+ }
+
+ String getFileSystemUrl() {
+ return this.blobServiceClient.getAccountUrl();
+ }
+
+ BlobServiceClient getBlobServiceClient() {
+ return this.blobServiceClient;
+ }
+
+ private BlobServiceClient buildBlobServiceClient(String endpoint, Map
+ * Particular care should be taken when working with a remote storage service. This implementation makes no guarantees
+ * on behavior or state should other processes operate on the same data concurrently; file systems from this provider
+ * will assume they have exclusive access to their data and will behave without regard for potential of interfering
+ * applications. Moreover, remote file stores introduce higher latencies. Therefore, additional consideration should be
+ * given to managing concurrency: race conditions are more likely to manifest and network failures occur more frequently
+ * than disk failures. These and other such distributed application scenarios must be considered when working with this
+ * file system. While the {@code AzureFileSystem} will ensure it takes appropriate steps towards robustness and
+ * reliability, the application developer must design around these failure scenarios and have fallback and retry options
+ * available.
+ *
+ * The Azure Blob Storage service backing these APIs is not a true FileSystem, nor is it the goal of this implementation
+ * to force Azure Blob Storage to act like a full-fledged file system. Some APIs and scenarios will remain unsupported
+ * indefinitely until they may be sensibly implemented. Other APIs may experience lower performance than is expected
+ * because of the number of network requests needed to ensure correctness. The javadocs for each type and method should
+ * also be read carefully to understand what guarantees are made and how they may differ from the contract defined by
+ * {@link FileSystemProvider}.
+ *
+ * The scheme for this provider is {@code "azb"}, and the format of the URI to identify an {@code AzureFileSystem} is
+ * {@code "azb://?endpoint=
+ * An {@link AzureFileSystem} is backed by an account. An {@link AzureFileStore} is backed by a container. Any number of
+ * containers may be specified as file stores upon creation of the file system. When a file system is created,
+ * it will try to retrieve the properties of each container to ensure connection to the account. If any of the
+ * containers does not exist, it will be created. Failure to access or create containers as necessary will result in
+ * an exception and failure to create the file system. Any data existing in the containers will be preserved and
+ * accessible via the file system, though customers should be aware that it must be in a format understandable by
+ * the types in this package or behavior will be undefined.
+ *
+ * {@link #newFileSystem(URI, Map)} will check for the following keys in the configuration map and expect the named
+ * types. Any entries not listed here will be ignored. Note that {@link AzureFileSystem} has public constants defined
+ * for each of the keys for convenience. Most values are documented in the blob package. Any values which are unique to
+ * nio will be documented here.
+ *
+ * Either an account key or a sas token must be specified. If both are provided, the account key will be preferred. If
+ * a sas token is specified, the customer must take care that it has appropriate permissions to perform the actions
+ * demanded of the file system in a given workflow, including the initial connection check specified above. The same
+ * token will be applied to all operations.
+ *
+ * An iterable of file stores must also be provided; each entry should simply be the name of a container. The first
+ * container listed will be considered the default file store and the root directory of which will be the file system's
+ * default directory. All other values listed are used to configure the underlying
+ * {@link com.azure.storage.blob.BlobServiceClient}. Please refer to that type for more information on these values.
+ *
+ * @see FileSystemProvider
+ */
+public final class AzureFileSystemProvider extends FileSystemProvider {
+ /*
+ * A static inner class is used to hold the ClientLogger for AzureFileSystemProvider to defer creating the
+ * ClientLogger until logging is needed. Some implementations of SLF4J may make calls to load FileSystemProviders
+ * which results in a load FileSystemProviders to occur during a call to load FileSystemProviders. This results in
+ * the JVM to throw an exception that a circular call to load FileSystemProviders has occurred.
+ */
+ private static final class ClientLoggerHolder {
+ private static final ClientLogger LOGGER = new ClientLogger(AzureFileSystemProvider.class);
+ }
+
+ /**
+ * A helper for setting the HTTP properties when creating a directory.
+ */
+ public static final String CONTENT_TYPE = "Content-Type";
+
+ /**
+ * A helper for setting the HTTP properties when creating a directory.
+ */
+ public static final String CONTENT_DISPOSITION = "Content-Disposition";
+
+ /**
+ * A helper for setting the HTTP properties when creating a directory.
+ */
+ public static final String CONTENT_LANGUAGE = "Content-Language";
+
+ /**
+ * A helper for setting the HTTP properties when creating a directory.
+ */
+ public static final String CONTENT_ENCODING = "Content-Encoding";
+
+ /**
+ * A helper for setting the HTTP properties when creating a directory.
+ */
+ public static final String CONTENT_MD5 = "Content-MD5";
+
+ /**
+ * A helper for setting the HTTP properties when creating a directory.
+ */
+ public static final String CACHE_CONTROL = "Cache-Control";
+
+ private static final String ENDPOINT_QUERY_KEY = "endpoint";
+ private static final int COPY_TIMEOUT_SECONDS = 30;
+ private static final Set
+ * The format of a {@code URI} identifying a file system is {@code "azb://?endpoint=
+ * Once closed, a file system with the same identifier may be reopened.
+ *
+ * @param uri URI reference
+ * @param config A map of provider specific properties to configure the file system
+ * @return a new file system.
+ * @throws IllegalArgumentException If the pre-conditions for the uri parameter aren't met, or the env parameter
+ * does not contain properties required by the provider, or a property value is invalid.
+ * @throws IOException If an I/O error occurs.
+ * @throws SecurityException never
+ * @throws FileSystemAlreadyExistsException If the file system has already been created.
+ */
+ @Override
+ public FileSystem newFileSystem(URI uri, Map
+ * The format of a {@code URI} identifying a file system is {@code "azb://?endpoint=<endpoint>"}.
+ *
+ * Trying to retrieve a closed file system will throw a {@link FileSystemNotFoundException}. Once closed, a
+ * file system with the same identifier may be reopened.
+ *
+ * @param uri URI reference
+ * @return the file system
+ * @throws IllegalArgumentException If the pre-conditions for the uri parameter aren't met
+ * @throws FileSystemNotFoundException If the file system already exists
+ * @throws SecurityException never
+ */
+ @Override
+ public FileSystem getFileSystem(URI uri) {
+ String endpoint = extractAccountEndpoint(uri);
+ if (!this.openFileSystems.containsKey(endpoint)) {
+ throw LoggingUtility.logError(ClientLoggerHolder.LOGGER,
+ new FileSystemNotFoundException("Name: " + endpoint));
+ }
+ return this.openFileSystems.get(endpoint);
+ }
+
+ /**
+ * Return a Path object by converting the given URI. The resulting Path is associated with a FileSystem that already
+ * exists.
+ *
+ * @param uri The URI to convert
+ * @return The path identified by the URI.
+ * @throws IllegalArgumentException If the URI scheme does not identify this provider or other preconditions on the
+ * uri parameter do not hold
+ * @throws FileSystemNotFoundException if the file system identified by the query does not exist
+ * @throws SecurityException never
+ *
+ * @see #getFileSystem(URI) for information on the URI format
+ */
+ @Override
+ public Path getPath(URI uri) {
+ return getFileSystem(uri).getPath(uri.getPath());
+ }
+
+ /**
+ * Opens or creates a file, returning a seekable byte channel to access the file.
+ *
+ * This method is primarily offered to support some jdk convenience methods such as
+ * {@link Files#createFile(Path, FileAttribute[])} which requires opening a channel and closing it. A channel may
+ * only be opened in read mode OR write mode. It may not be opened in read/write mode. Seeking is supported for
+ * reads, but not for writes. Modifications to existing files is not permitted--only creating new files or
+ * overwriting existing files.
+ *
+ * This type is not threadsafe to prevent having to hold locks across network calls.
+ *
+ * @param path the path of the file to open
+ * @param set options specifying how the file should be opened
+ * @param fileAttributes an optional list of file attributes to set atomically when creating the directory
+ * @return a new seekable byte channel
+ * @throws UnsupportedOperationException Operation is not supported.
+ * @throws IllegalArgumentException if the set contains an invalid combination of options
+ * @throws FileAlreadyExistsException if a file of that name already exists and the CREATE_NEW option is specified
+ * (optional specific exception)
+ * @throws IOException If an I/O error occurs.
+ * @throws SecurityException never
+ */
+ @Override
+ public SeekableByteChannel newByteChannel(Path path, Set extends OpenOption> set,
+ FileAttribute>... fileAttributes) throws IOException {
+ if (Objects.isNull(set)) {
+ set = Collections.emptySet();
+ }
+
+ if (set.contains(StandardOpenOption.WRITE)) {
+ return new AzureSeekableByteChannel(
+ (NioBlobOutputStream) this.newOutputStreamInternal(path, set, fileAttributes), path);
+ } else {
+ return new AzureSeekableByteChannel(
+ (NioBlobInputStream) this.newInputStream(path, set.toArray(new OpenOption[0])), path);
+ }
+ }
+
+ /**
+ * Opens an {@link InputStream} to the given path.
+ *
+ * The stream will not attempt to read or buffer the entire file. However, when fetching data, it will always
+ * request the same size chunk of several MB to prevent network thrashing on small reads. Mark and reset are
+ * supported.
+ *
+ * Only {@link StandardOpenOption#READ} is supported. Any other option will throw.
+ *
+ * @param path the path to the file to open
+ * @param options options specifying how the file is opened
+ * @return a new input stream
+ * @throws IllegalArgumentException if an invalid combination of options is specified
+ * @throws UnsupportedOperationException if an unsupported option is specified
+ * @throws IOException If an I/O error occurs.
+ * @throws SecurityException never
+ */
+ @Override
+ public InputStream newInputStream(Path path, OpenOption... options) throws IOException {
+ // Validate options. Only read is supported.
+ if (options.length > 1 || (options.length > 0 && !options[0].equals(StandardOpenOption.READ))) {
+ throw LoggingUtility.logError(ClientLoggerHolder.LOGGER,
+ new UnsupportedOperationException("Only the read option is supported."));
+ }
+
+ AzureResource resource = new AzureResource(path);
+ AzurePath.ensureFileSystemOpen(resource.getPath());
+
+ // Ensure the path points to a file.
+ if (!resource.checkDirStatus().equals(DirectoryStatus.NOT_A_DIRECTORY)) {
+ throw LoggingUtility.logError(ClientLoggerHolder.LOGGER,
+ new IOException("Path either does not exist or points to a directory."
+ + "Path must point to a file. Path: " + path.toString()));
+ }
+
+ // Note that methods on BlobInputStream are already synchronized.
+ return new NioBlobInputStream(resource.getBlobClient().openInputStream(), resource.getPath());
+ }
+
+ /**
+ * Opens an {@link OutputStream} to the given path. The resulting file will be stored as a block blob.
+ *
+ * The only supported options are {@link StandardOpenOption#CREATE}, {@link StandardOpenOption#CREATE_NEW},
+ * {@link StandardOpenOption#WRITE}, {@link StandardOpenOption#TRUNCATE_EXISTING}. Any other options will throw an
+ * {@link UnsupportedOperationException}. {@code WRITE} and {@code TRUNCATE_EXISTING} must be specified or an
+ * {@link IllegalArgumentException} will be thrown. Hence, files cannot be updated, only overwritten completely.
+ *
+ * This stream will not attempt to buffer the entire file, however some buffering will be done for potential
+ * optimizations and to avoid network thrashing. Specifically, up to
+ * {@link AzureFileSystem#AZURE_STORAGE_PUT_BLOB_THRESHOLD} bytes will be buffered initially. If that threshold is
+ * exceeded, the data will be broken into chunks and sent in blocks, and writes will be buffered into sizes of
+ * {@link AzureFileSystem#AZURE_STORAGE_UPLOAD_BLOCK_SIZE}. The maximum number of buffers of this size to be
+ * allocated is defined by {@link AzureFileSystem#AZURE_STORAGE_MAX_CONCURRENCY_PER_REQUEST}, which also configures
+ * the level of parallelism with which we may write and thus may affect write speeds as well.
+ *
+ * The data is only committed when the steam is closed. Hence, data cannot be read from the destination until the
+ * stream is closed. When the close method returns, it is guaranteed that, barring any errors, the data is finalized
+ * and available for reading.
+ *
+ * Writing happens asynchronously. Bytes passed for writing are stored until either the threshold or block size are
+ * met at which time they are sent to the service. When the write method returns, there is no guarantee about which
+ * phase of this process the data is in other than it has been accepted and will be written. Again, closing will
+ * guarantee that the data is written and available.
+ *
+ * Flush is a no-op as regards data transfers, but it can be used to check the state of the stream for errors.
+ * This can be a useful tool because writing happens asynchronously, and therefore an error from a previous write
+ * may not otherwise be thrown unless the stream is flushed, closed, or written to again.
+ *
+ * @param path the path to the file to open or create
+ * @param options options specifying how the file is opened
+ * @return a new output stream
+ * @throws IllegalArgumentException if an invalid combination of options is specified
+ * @throws UnsupportedOperationException if an unsupported option is specified
+ * @throws IOException If an I/O error occurs.
+ * @throws SecurityException never
+ */
+ @Override
+ public OutputStream newOutputStream(Path path, OpenOption... options) throws IOException {
+ return newOutputStreamInternal(path, new HashSet<>(Arrays.asList(options)));
+ }
+
+ OutputStream newOutputStreamInternal(Path path, Set extends OpenOption> optionsSet,
+ FileAttribute>... fileAttributes) throws IOException {
+ // If options are empty, add Create, Write, TruncateExisting as defaults per nio docs.
+ if (optionsSet == null || optionsSet.size() == 0) {
+ optionsSet = OUTPUT_STREAM_DEFAULT_OPTIONS;
+ }
+
+ // Check for unsupported options.
+ for (OpenOption option : optionsSet) {
+ if (!OUTPUT_STREAM_SUPPORTED_OPTIONS.contains(option)) {
+ throw LoggingUtility.logError(ClientLoggerHolder.LOGGER,
+ new UnsupportedOperationException("Unsupported option: " + option.toString()));
+ }
+ }
+
+ /*
+ Write must be specified. Either create_new or truncate must be specified. This is to ensure that no edits or
+ appends are allowed.
+ */
+ if (!optionsSet.contains(StandardOpenOption.WRITE)
+ || !(optionsSet.contains(StandardOpenOption.TRUNCATE_EXISTING)
+ || optionsSet.contains(StandardOpenOption.CREATE_NEW))) {
+ throw LoggingUtility.logError(ClientLoggerHolder.LOGGER,
+ new IllegalArgumentException("Write and either CreateNew or TruncateExisting must be specified to open "
+ + "an OutputStream"));
+ }
+
+ AzureResource resource = new AzureResource(path);
+ AzurePath.ensureFileSystemOpen(resource.getPath());
+ DirectoryStatus status = resource.checkDirStatus();
+
+ // Cannot write to a directory.
+ if (DirectoryStatus.isDirectory(status)) {
+ throw LoggingUtility.logError(ClientLoggerHolder.LOGGER,
+ new IOException("Cannot open an OutputStream to a directory. Path: " + path.toString()));
+ }
+
+ // Writing to an empty location requires a create option.
+ if (status.equals(DirectoryStatus.DOES_NOT_EXIST)
+ && !(optionsSet.contains(StandardOpenOption.CREATE)
+ || optionsSet.contains(StandardOpenOption.CREATE_NEW))) {
+ throw LoggingUtility.logError(ClientLoggerHolder.LOGGER,
+ new IOException("Writing to an empty location requires a create option. Path: " + path.toString()));
+ }
+
+ // Cannot write to an existing file if create new was specified.
+ if (status.equals(DirectoryStatus.NOT_A_DIRECTORY) && optionsSet.contains(StandardOpenOption.CREATE_NEW)) {
+ throw LoggingUtility.logError(ClientLoggerHolder.LOGGER,
+ new IOException("A file already exists at this location and "
+ + "CREATE_NEW was specified. Path: " + path.toString()));
+ }
+
+ // Create options based on file system config
+ AzureFileSystem fs = (AzureFileSystem) (path.getFileSystem());
+ Integer blockSize = fs.getBlockSize() == null ? null : fs.getBlockSize().intValue();
+ Integer putBlobThreshold = fs.getPutBlobThreshold() == null ? null : fs.getPutBlobThreshold().intValue();
+ ParallelTransferOptions pto = new ParallelTransferOptions(blockSize, fs.getMaxConcurrencyPerRequest(), null,
+ putBlobThreshold);
+
+ // Add an extra etag check for create new
+ BlobRequestConditions rq = null;
+ if (optionsSet.contains(StandardOpenOption.CREATE_NEW)) {
+ rq = new BlobRequestConditions().setIfNoneMatch("*");
+ }
+
+ // For parsing properties and metadata
+ if (fileAttributes == null) {
+ fileAttributes = new FileAttribute>[0];
+ }
+ resource.setFileAttributes(Arrays.asList(fileAttributes));
+
+ return new NioBlobOutputStream(resource.getBlobOutputStream(pto, rq), resource.getPath());
+ }
+
+ /**
+ * Returns an {@link AzureDirectoryStream} for iterating over the contents of a directory. The elements returned by
+ * the directory stream's iterator are of type Path, each one representing an entry in the directory. The Path
+ * objects are obtained as if by resolving the name of the directory entry against dir. The entries returned by the
+ * iterator are filtered by the given filter.
+ *
+ * When not using the try-with-resources construct, then directory stream's close method should be invoked after
+ * iteration is completed to free any resources held for the open directory.
+ *
+ * Where the filter terminates due to an uncaught error or runtime exception then it is propagated to the hasNext or
+ * next method. Where an IOException is thrown, it results in the hasNext or next method throwing a
+ * DirectoryIteratorException with the IOException as the cause.
+ *
+ * @param path the path to the directory
+ * @param filter the directory stream filter
+ * @return a new and open {@code DirectoryStream} object
+ * @throws IllegalArgumentException If the path type is not an instance of {@link AzurePath}.
+ * @throws NotDirectoryException if the file could not otherwise be opened because it is not a directory
+ * @throws IOException If an I/O error occurs.
+ * @throws SecurityException never
+ */
+ @Override
+ public DirectoryStream
+ * The existence of a directory in the {@code AzureFileSystem} is defined on two levels. Weak existence is
+ * defined by the presence of a non-zero number of blobs prefixed with the directory's path. This concept is also
+ * known as a virtual directory and enables the file system to work with containers that were pre-loaded
+ * with data by another source but need to be accessed by this file system. Strong existence is defined as
+ * the presence of an actual storage resource at the given path, which in the case of directories, is a zero-length
+ * blob whose name is the directory path with a particular metadata field indicating the blob's status as a
+ * directory. This is also known as a concrete directory. Directories created by this file system will
+ * strongly exist. Operations targeting directories themselves as the object (e.g. setting properties) will target
+ * marker blobs underlying concrete directories. Other operations (e.g. listing) will operate on the blob-name
+ * prefix.
+ *
+ * This method fulfills the nio contract of: "The check for the existence of the file and the creation of the
+ * directory if it does not exist are a single operation that is atomic with respect to all other filesystem
+ * activities that might affect the directory." More specifically, this method will atomically check for strong
+ * existence of another file or directory at the given path and fail if one is present. On the other hand, we
+ * only check for weak existence of the parent to determine if the given path is valid. Additionally, the
+ * action of checking whether the parent exists, is not atomic with the creation of the directory. Note that
+ * while it is possible that the parent may be deleted between when the parent is determined to exist and the
+ * creation of the child, the creation of the child will always ensure the existence of a virtual parent, so the
+ * child will never be left floating and unreachable. The different checks on parent and child is due to limitations
+ * in the Storage service API.
+ *
+ * There may be some unintuitive behavior when working with directories in this file system, particularly virtual
+ * directories (usually those not created by this file system). A virtual directory will disappear as soon as all
+ * its children have been deleted. Furthermore, if a directory with the given path weakly exists at the time of
+ * calling this method, this method will still return success and create a concrete directory at the target
+ * location. In other words, it is possible to "double create" a directory if it first weakly exists and then is
+ * strongly created. This is both because it is impossible to atomically check if a virtual directory exists while
+ * creating a concrete directory and because such behavior will have minimal side effects--no files will be
+ * overwritten and the directory will still be available for writing as intended, though it may not be empty. This
+ * is not a complete list of such unintuitive behavior.
+ *
+ * This method will attempt to extract standard HTTP content headers from the list of file attributes to set them
+ * as blob headers. All other attributes will be set as blob metadata. The value of every attribute will be
+ * converted to a {@code String} except the Content-MD5 attribute which expects a {@code byte[]}.
+ * When extracting the content headers, the following strings will be used for comparison (constants for these
+ * values can be found on this type):
+ *
+ * This method is not atomic with respect to other file system operations. It is possible to delete a file in use by
+ * another process, and doing so will not immediately invalidate any channels open to that file--they will simply
+ * start to fail. Root directories cannot be deleted even when empty.
+ *
+ * @param path the path to the file to delete
+ * @throws IllegalArgumentException If the path type is not an instance of {@link AzurePath}.
+ * @throws NoSuchFileException if the file does not exist
+ * @throws DirectoryNotEmptyException if the file is a directory and could not otherwise be deleted because the
+ * directory is not empty
+ * @throws IOException If an I/O error occurs.
+ * @throws SecurityException never
+ */
+ @Override
+ public void delete(Path path) throws IOException {
+ // Basic validation. Must be an AzurePath. Cannot be a root.
+ AzureResource azureResource = new AzureResource(path);
+ AzurePath.ensureFileSystemOpen(azureResource.getPath());
+
+ // Check directory status--possibly throw DirectoryNotEmpty or NoSuchFile.
+ DirectoryStatus dirStatus = azureResource.checkDirStatus();
+ if (dirStatus.equals(DirectoryStatus.DOES_NOT_EXIST)) {
+ throw LoggingUtility.logError(ClientLoggerHolder.LOGGER, new NoSuchFileException(path.toString()));
+ }
+ if (dirStatus.equals(DirectoryStatus.NOT_EMPTY)) {
+ throw LoggingUtility.logError(ClientLoggerHolder.LOGGER, new DirectoryNotEmptyException(path.toString()));
+ }
+
+ // After all validation has completed, delete the resource.
+ try {
+ azureResource.getBlobClient().delete();
+ } catch (BlobStorageException e) {
+ if (e.getErrorCode().equals(BlobErrorCode.BLOB_NOT_FOUND)) {
+ throw LoggingUtility.logError(ClientLoggerHolder.LOGGER, new NoSuchFileException(path.toString()));
+ }
+ throw LoggingUtility.logError(ClientLoggerHolder.LOGGER, new IOException(e));
+ }
+ }
+
+ /**
+ * Copies the resource at the source location to the destination.
+ *
+ * This method is not atomic with respect to other file system operations. More specifically, the checks necessary
+ * to validate the inputs and state of the file system are not atomic with the actual copying of data. If the copy
+ * is triggered, the copy itself is atomic and only a complete copy will ever be left at the destination.
+ *
+ * In addition to those in the docs for {@link FileSystemProvider#copy(Path, Path, CopyOption...)}, this method has
+ * the following requirements for successful completion. {@link StandardCopyOption#COPY_ATTRIBUTES} must be passed
+ * as it is impossible not to copy blob properties; if this option is not passed, an
+ * {@link UnsupportedOperationException} will be thrown. Neither the source nor the destination can be a root
+ * directory; if either is a root directory, an {@link IllegalArgumentException} will be thrown. The parent
+ * directory of the destination must at least weakly exist; if it does not, an {@link IOException} will be thrown.
+ * The only supported option other than {@link StandardCopyOption#COPY_ATTRIBUTES} is
+ * {@link StandardCopyOption#REPLACE_EXISTING}; the presence of any other option will result in an
+ * {@link UnsupportedOperationException}.
+ *
+ * This method supports both virtual and concrete directories as both the source and destination. Unlike when
+ * creating a directory, the existence of a virtual directory at the destination will cause this operation to fail.
+ * This is in order to prevent the possibility of overwriting a non-empty virtual directory with a file. Still, as
+ * mentioned above, this check is not atomic with the creation of the resultant directory.
+ *
+ * @param source the path to the file to copy
+ * @param destination the path to the target file
+ * @param copyOptions specifying how the copy should be done
+ * @throws UnsupportedOperationException if the array contains a copy option that is not supported
+ * @throws FileAlreadyExistsException if the target file exists but cannot be replaced because the REPLACE_EXISTING
+ * option is not specified
+ * @throws DirectoryNotEmptyException the REPLACE_EXISTING option is specified but the file cannot be replaced
+ * because it is a non-empty directory
+ * @throws IOException If an I/O error occurs.
+ * @throws IllegalArgumentException If the path type is not an instance of {@link AzurePath}.
+ * @throws SecurityException never
+ * @see #createDirectory(Path, FileAttribute[]) for more information about directory existence.
+ */
+ @Override
+ public void copy(Path source, Path destination, CopyOption... copyOptions) throws IOException {
+ // If paths point to the same file, operation is a no-op.
+ if (source.equals(destination)) {
+ return;
+ }
+
+ // Read and validate options.
+ // Remove accepted options as we find them. Anything left we don't support.
+ boolean replaceExisting = false;
+ List
+ * This method may only be used to check the existence of a file. It is not possible to determine the permissions
+ * granted to a given client, so if any mode argument is specified, an {@link UnsupportedOperationException} will be
+ * thrown.
+ *
+ * @param path the path to the file to check
+ * @param accessModes The access modes to check; may have zero elements
+ * @throws NoSuchFileException if a file does not exist
+ * @throws java.nio.file.AccessDeniedException the requested access would be denied or the access cannot be
+ * determined because the Java virtual machine has insufficient privileges or other reasons
+ * @throws IOException If an I/O error occurs.
+ * @throws SecurityException never
+ */
+ @Override
+ public void checkAccess(Path path, AccessMode... accessModes) throws IOException {
+ if (accessModes != null && accessModes.length != 0) {
+ throw LoggingUtility.logError(ClientLoggerHolder.LOGGER,
+ new AccessDeniedException("The access cannot be determined."));
+ }
+ AzurePath.ensureFileSystemOpen(path);
+
+ /*
+ Some static utility methods in the jdk require checking access on a root. ReadAttributes is not supported on
+ roots as they are containers. Furthermore, we always assume that roots exist as they are verified at creation
+ and cannot be deleted by the file system. Thus, we prefer a short circuit for roots.
+ */
+ if (path instanceof AzurePath && ((AzurePath) path).isRoot()) {
+ return;
+ }
+
+ // Read attributes already wraps BlobStorageException in an IOException.
+ try {
+ readAttributes(path, BasicFileAttributes.class);
+ } catch (IOException e) {
+ Throwable cause = e.getCause();
+ if (cause instanceof BlobStorageException
+ && BlobErrorCode.BLOB_NOT_FOUND.equals(((BlobStorageException) cause).getErrorCode())) {
+ throw LoggingUtility.logError(ClientLoggerHolder.LOGGER, new NoSuchFileException(path.toString()));
+ } else {
+ throw LoggingUtility.logError(ClientLoggerHolder.LOGGER, e);
+ }
+ }
+ }
+
+ /**
+ * Returns a file attribute view of a given type.
+ *
+ * See {@link AzureBasicFileAttributeView} and {@link AzureBlobFileAttributeView} for more information.
+ *
+ * Reading attributes on a virtual directory will return {@code null} for most properties other than
+ * {@link AzureBlobFileAttributes#isVirtualDirectory()}, which will return true. See
+ * {@link #createDirectory(Path, FileAttribute[])} for more information on virtual directories.
+ *
+ * @param path the path to the file
+ * @param type the Class object corresponding to the file attribute view
+ * @param linkOptions ignored
+ * @return a file attribute view of the specified type, or null if the attribute view type is not available
+ */
+ @Override
+ @SuppressWarnings("unchecked")
+ public
+ * See {@link AzureBasicFileAttributes} and {@link AzureBlobFileAttributes} for more information.
+ *
+ * Reading attributes on a virtual directory will return {@code null} for most properties other than
+ * {@link AzureBlobFileAttributes#isVirtualDirectory()}, which will return true. See
+ * {@link #createDirectory(Path, FileAttribute[])} for more information on virtual directories.
+ *
+ * @param path the path to the file
+ * @param type the Class of the file attributes required to read
+ * @param linkOptions ignored
+ * @return the file attributes
+ * @throws UnsupportedOperationException if an attributes of the given type are not supported
+ * @throws IOException If an I/O error occurs.
+ * @throws SecurityException never
+ */
+ @Override
+ @SuppressWarnings("unchecked")
+ public A readAttributes(Path path, Class type, LinkOption... linkOptions)
+ throws IOException {
+ AzurePath.ensureFileSystemOpen(path);
+
+ Class extends BasicFileAttributeView> view;
+ if (type == BasicFileAttributes.class || type == AzureBasicFileAttributes.class) {
+ view = AzureBasicFileAttributeView.class;
+ } else if (type == AzureBlobFileAttributes.class) {
+ view = AzureBlobFileAttributeView.class;
+ } else {
+ throw LoggingUtility.logError(ClientLoggerHolder.LOGGER, new UnsupportedOperationException());
+ }
+
+ /*
+ Resource validation will happen in readAttributes of the view. We don't want to double-check, and checking
+ internal to the view ensures it is always checked no matter which code path is taken.
+ */
+ return (A) getFileAttributeView(path, view, linkOptions).readAttributes();
+ }
+
+ /**
+ * Reads a set of file attributes as a bulk operation.
+ *
+ * See {@link AzureBasicFileAttributes} and {@link AzureBlobFileAttributes} for more information.
+ *
+ * Reading attributes on a virtual directory will return {@code null} for all properties other than
+ * {@link AzureBlobFileAttributes#isVirtualDirectory()}, which will return true. See
+ * {@link #createDirectory(Path, FileAttribute[])} for more information on virtual directories.
+ *
+ * @param path the path to the file
+ * @param attributes the attributes to read
+ * @param linkOptions ignored
+ * @return a map of the attributes returned; may be empty. The map's keys are the attribute names, its values are
+ * the attribute values
+ * @throws UnsupportedOperationException if an attributes of the given type are not supported
+ * @throws IllegalArgumentException if no attributes are specified or an unrecognized attributes is specified
+ * @throws IOException If an I/O error occurs.
+ * @throws SecurityException never
+ */
+ @Override
+ public Map
+ * See {@link AzureBlobFileAttributeView} for more information.
+ *
+ * Setting attributes on a virtual directory is not supported and will throw an {@link IOException}. See
+ * {@link #createDirectory(Path, FileAttribute[])} for more information on virtual directories.
+ *
+ * @param path the path to the file
+ * @param attributes the attribute to set
+ * @param value the attribute value
+ * @param linkOptions ignored
+ * @throws UnsupportedOperationException if an attribute view is not available
+ * @throws IllegalArgumentException if the attribute name is not specified, or is not recognized, or the attribute
+ * value is of the correct type but has an inappropriate value
+ * @throws ClassCastException If the attribute value is not of the expected type or is a collection containing
+ * elements that are not of the expected type
+ * @throws IOException If an I/O error occurs.
+ * @throws SecurityException never
+ */
+ @Override
+ public void setAttribute(Path path, String attributes, Object value, LinkOption... linkOptions) throws IOException {
+ AzurePath.ensureFileSystemOpen(path);
+ String viewType;
+ String attributeName;
+ String[] parts = attributes.split(":");
+ if (parts.length > 2) {
+ throw LoggingUtility.logError(ClientLoggerHolder.LOGGER,
+ new IllegalArgumentException("Invalid format for attribute string: " + attributes));
+ }
+ if (parts.length == 1) {
+ viewType = "basic"; // Per jdk docs.
+ attributeName = attributes;
+ } else {
+ viewType = parts[0];
+ attributeName = parts[1];
+ }
+
+ /*
+ For specificity, our basic implementation of BasicFileAttributes uses the name azureBasic. However, the docs
+ state that "basic" must be supported, so we funnel to azureBasic.
+ */
+ if ("basic".equals(viewType)) {
+ viewType = AzureBasicFileAttributeView.NAME;
+ }
+
+ // We don't actually support any setters on the basic view.
+ if (viewType.equals(AzureBasicFileAttributeView.NAME)) {
+ throw LoggingUtility.logError(ClientLoggerHolder.LOGGER,
+ new IllegalArgumentException("Invalid attribute. View: " + viewType
+ + ". Attribute: " + attributeName));
+ } else if (viewType.equals(AzureBlobFileAttributeView.NAME)) {
+ Map
+ * The root component, if it is present, is the first element of the path and is denoted by a {@code ':'} as the last
+ * character. Hence, only one instance of {@code ':'} may appear in a path string, and it may only be the last character
+ * of the first element in the path. The root component is used to identify which container a path belongs to. All other
+ * path elements, including separators, are considered as the blob name. {@link AzurePath#fromBlobUrl} may
+ * be used to convert a typical http url pointing to a blob into an {@code AzurePath} object pointing to the same
+ * resource.
+ *
+ * Constructing a syntactically valid path does not ensure a resource exists at the given path. An error will
+ * not be thrown until trying to access an invalid resource, e.g. trying to access a resource that does not exist.
+ *
+ * Path names are case-sensitive.
+ *
+ * If a resource is accessed via a relative path, it will be resolved against the default directory of the file system.
+ * The default directory is as defined in the {@link AzureFileSystem} docs.
+ *
+ * Leading and trailing separators will be stripped from each component passed to
+ * {@link AzureFileSystem#getPath(String, String...)}. This has the effect of treating "foo/" as though it were simply
+ * "foo".
+ */
+public final class AzurePath implements Path {
+ private static final ClientLogger LOGGER = new ClientLogger(AzurePath.class);
+ static final String ROOT_DIR_SUFFIX = ":";
+
+ private final AzureFileSystem parentFileSystem;
+ private final String pathString;
+
+ AzurePath(AzureFileSystem parentFileSystem, String first, String... more) {
+ this.parentFileSystem = parentFileSystem;
+
+ /*
+ Break all strings into their respective elements and remove empty elements. This has the effect of stripping
+ any trailing, leading, or internal delimiters so there are no duplicates/empty elements when we join.
+ */
+ List
+ * An absolute path is complete in that it doesn't need to be combined with other path information in order to
+ * locate a file. A path is considered absolute in this file system if it contains a root component.
+ *
+ * @return whether the path is absolute
+ */
+ @Override
+ public boolean isAbsolute() {
+ return this.getRoot() != null;
+ }
+
+ /**
+ * Returns the root component of this path as a Path object, or null if this path does not have a root component.
+ *
+ * The root component of this path also identifies the Azure Storage Container in which the file is stored. This
+ * method will not validate that the root component corresponds to an actual file store/container in this
+ * file system. It will simply return the root component of the path if one is present and syntactically valid.
+ *
+ * @return a path representing the root component of this path, or null
+ */
+ @Override
+ public Path getRoot() {
+ // Check if the first element of the path is formatted like a root directory.
+ String[] elements = this.splitToElements();
+ if (elements.length > 0 && elements[0].endsWith(ROOT_DIR_SUFFIX)) {
+ return this.parentFileSystem.getPath(elements[0]);
+ }
+ return null;
+ }
+
+ /**
+ * Returns the name of the file or directory denoted by this path as a Path object. The file name is the farthest
+ * element from the root in the directory hierarchy.
+ *
+ * @return a path representing the name of the file or directory, or null if this path has zero elements
+ */
+ @Override
+ public Path getFileName() {
+ if (this.isRoot()) {
+ return null;
+ } else if (this.pathString.isEmpty()) {
+ return this;
+ } else {
+ List
+ * The parent of this path object consists of this path's root component, if any, and each element in the path
+ * except for the farthest from the root in the directory hierarchy. This method does not access the file system;
+ * the path or its parent may not exist. Furthermore, this method does not eliminate special names such as "." and
+ * ".." that may be used in some implementations. On UNIX for example, the parent of "/a/b/c" is "/a/b", and the
+ * parent of "x/y/." is "x/y". This method may be used with the normalize method, to eliminate redundant names, for
+ * cases where shell-like navigation is required.
+ *
+ * If this path has one or more elements, and no root component, then this method is equivalent to evaluating the
+ * expression:
+ *
+ * {@code subpath(0, getNameCount()-1);}
+ *
+ * @return a path representing the path's parent
+ */
+ @Override
+ public Path getParent() {
+ /*
+ If this path only has one element or is empty, there is no parent. Note the root is included in the parent, so
+ we don't use getNameCount here.
+ */
+ String[] elements = this.splitToElements();
+ if (elements.length == 1 || elements.length == 0) {
+ return null;
+ }
+
+ return this.parentFileSystem.getPath(
+ this.pathString.substring(0, this.pathString.lastIndexOf(this.parentFileSystem.getSeparator())));
+ }
+
+ /**
+ * Returns the number of name elements in the path.
+ *
+ * @return the number of elements in the path, or 0 if this path only represents a root component
+ */
+ @Override
+ public int getNameCount() {
+ if (this.pathString.isEmpty()) {
+ return 1;
+ }
+ return this.splitToElements(this.withoutRoot()).length;
+ }
+
+ /**
+ * Returns a name element of this path as a Path object.
+ *
+ * The index parameter is the index of the name element to return. The element that is closest to the root in the
+ * directory hierarchy has index 0. The element that is farthest from the root has index {@code count-1}.
+ *
+ * @param index the index of the element
+ * @return the name element
+ * @throws IllegalArgumentException if index is negative, index is greater than or equal to the number of elements,
+ * or this path has zero name elements
+ */
+ @Override
+ public Path getName(int index) {
+ if (index < 0 || index >= this.getNameCount()) {
+ throw LoggingUtility.logError(LOGGER, new IllegalArgumentException(String.format("Index %d is out of "
+ + "bounds", index)));
+ }
+ // If the path is empty, the only valid option is also an empty path.
+ if (this.pathString.isEmpty()) {
+ return this;
+ }
+
+ return this.parentFileSystem.getPath(this.splitToElements(this.withoutRoot())[index]);
+ }
+
+ /**
+ * Returns a relative Path that is a subsequence of the name elements of this path.
+ *
+ * The beginIndex and endIndex parameters specify the subsequence of name elements. The name that is closest to the
+ * root in the directory hierarchy has index 0. The name that is farthest from the root has index {@code count-1}.
+ * The returned Path object has the name elements that begin at beginIndex and extend to the element at index
+ * {@code endIndex-1}.
+ *
+ * @param begin the index of the first element, inclusive
+ * @param end the index of the last element, exclusive
+ * @return a new Path object that is a subsequence of the name elements in this Path
+ */
+ @Override
+ public Path subpath(int begin, int end) {
+ if (begin < 0 || begin >= this.getNameCount()
+ || end <= begin || end > this.getNameCount()) {
+ throw LoggingUtility.logError(LOGGER,
+ new IllegalArgumentException(String.format("Values of begin: %d and end: %d are invalid", begin, end)));
+ }
+
+ String[] subnames = Stream.of(this.splitToElements(this.withoutRoot()))
+ .skip(begin)
+ .limit(end - begin)
+ .toArray(String[]::new);
+
+ return this.parentFileSystem.getPath(String.join(this.parentFileSystem.getSeparator(), subnames));
+ }
+
+ /**
+ * Tests if this path starts with the given path.
+ *
+ * This path starts with the given path if this path's root component starts with the root component of the given
+ * path, and this path starts with the same name elements as the given path. If the given path has more name
+ * elements than this path then false is returned.
+ *
+ * If this path does not have a root component and the given path has a root component then this path does not start
+ * with the given path.
+ *
+ * If the given path is associated with a different FileSystem to this path then false is returned.
+ *
+ * In this implementation, a root component starts with another root component if the two root components are
+ * equivalent strings. In other words, if the files are stored in the same container.
+ *
+ * @param path the given path
+ * @return true if this path starts with the given path; otherwise false
+ */
+ @Override
+ public boolean startsWith(Path path) {
+ if (!path.getFileSystem().equals(this.parentFileSystem)) {
+ return false;
+ }
+
+ // An empty path never starts with another path and is never the start of another path.
+ if (this.pathString.isEmpty() ^ ((AzurePath) path).pathString.isEmpty()) {
+ return false;
+ }
+
+ String[] thisPathElements = this.splitToElements();
+ String[] otherPathElements = ((AzurePath) path).splitToElements();
+ if (otherPathElements.length > thisPathElements.length) {
+ return false;
+ }
+ for (int i = 0; i < otherPathElements.length; i++) {
+ if (!otherPathElements[i].equals(thisPathElements[i])) {
+ return false;
+ }
+ }
+
+ return true;
+ }
+
+ /**
+ * Tests if this path starts with a Path, constructed by converting the given path string, in exactly the manner
+ * specified by the startsWith(Path) method.
+ *
+ * @param path the given path string
+ * @return true if this path starts with the given path; otherwise false
+ * @throws InvalidPathException If the path string cannot be converted to a Path.
+ */
+ @Override
+ public boolean startsWith(String path) {
+ return this.startsWith(this.parentFileSystem.getPath(path));
+ }
+
+ /**
+ * Tests if this path ends with the given path.
+ *
+ * If the given path has N elements, and no root component, and this path has N or more elements, then this path
+ * ends with the given path if the last N elements of each path, starting at the element farthest from the root,
+ * are equal.
+ *
+ * If the given path has a root component then this path ends with the given path if the root component of this path
+ * ends with the root component of the given path, and the corresponding elements of both paths are equal. If this
+ * path does not have a root component and the given path has a root component then this path does not end with the
+ * given path.
+ *
+ * If the given path is associated with a different FileSystem to this path then false is returned.
+ *
+ * In this implementation, a root component ends with another root component if the two root components are
+ * equivalent strings. In other words, if the files are stored in the same container.
+ *
+ * @param path the given path
+ * @return true if this path ends with the given path; otherwise false
+ */
+ @Override
+ public boolean endsWith(Path path) {
+ /*
+ There can only be one instance of a file system with a given id, so comparing object identity is equivalent
+ to checking ids here.
+ */
+ if (path.getFileSystem() != this.parentFileSystem) {
+ return false;
+ }
+
+ // An empty path never ends with another path and is never the end of another path.
+ if (this.pathString.isEmpty() ^ ((AzurePath) path).pathString.isEmpty()) {
+ return false;
+ }
+
+ String[] thisPathElements = this.splitToElements();
+ String[] otherPathElements = ((AzurePath) path).splitToElements();
+ if (otherPathElements.length > thisPathElements.length) {
+ return false;
+ }
+ // If the given path has a root component, the paths must be equal.
+ if (path.getRoot() != null && otherPathElements.length != thisPathElements.length) {
+ return false;
+ }
+ for (int i = 1; i <= otherPathElements.length; i++) {
+ if (!otherPathElements[otherPathElements.length - i]
+ .equals(thisPathElements[thisPathElements.length - i])) {
+ return false;
+ }
+ }
+ return true;
+ }
+
+ /**
+ * Tests if this path ends with a Path, constructed by converting the given path string, in exactly the manner
+ * specified by the endsWith(Path) method.
+ *
+ * @param path the given path string
+ * @return true if this path starts with the given path; otherwise false
+ * @throws InvalidPathException If the path string cannot be converted to a Path.
+ */
+ @Override
+ public boolean endsWith(String path) {
+ return this.endsWith(this.parentFileSystem.getPath(path));
+ }
+
+ /**
+ * Returns a path that is this path with redundant name elements eliminated.
+ *
+ * It derives from this path, a path that does not contain redundant name elements. The "." and ".." are special
+ * names used to indicate the current directory and parent directory. All occurrences of "." are considered
+ * redundant. If a ".." is preceded by a non-".." name then both names are considered redundant (the process to
+ * identify such names is repeated until is it no longer applicable).
+ *
+ * This method does not access the file system; the path may not locate a file that exists. Eliminating ".." and a
+ * preceding name from a path may result in the path that locates a different file than the original path
+ *
+ * @return the resulting path or this path if it does not contain redundant name elements; an empty path is returned
+ * if this path does have a root component and all name elements are redundant
+ *
+ */
+ @Override
+ public Path normalize() {
+ Deque
+ * If the other parameter is an absolute path then this method trivially returns other. If other is an empty path
+ * then this method trivially returns this path. Otherwise, this method considers this path to be a directory and
+ * resolves the given path against this path. In the simplest case, the given path does not have a root component,
+ * in which case this method joins the given path to this path and returns a resulting path that ends with the given
+ * path. Where the given path has a root component then resolution is highly implementation dependent and therefore
+ * unspecified.
+ *
+ * @param path the path to resolve against this path
+ * @return the resulting path
+ */
+ @Override
+ public Path resolve(Path path) {
+ if (path.isAbsolute()) {
+ return path;
+ }
+ if (path.getNameCount() == 0) {
+ return this;
+ }
+ return this.parentFileSystem.getPath(this.toString(), path.toString());
+ }
+
+ /**
+ * Converts a given path string to a Path and resolves it against this Path in exactly the manner specified by the
+ * {@link #resolve(Path) resolve} method.
+ *
+ * @param path the path string to resolve against this path
+ * @return the resulting path
+ * @throws InvalidPathException if the path string cannot be converted to a Path.
+ */
+ @Override
+ public Path resolve(String path) {
+ return this.resolve(this.parentFileSystem.getPath(path));
+ }
+
+ /**
+ * Resolves the given path against this path's parent path. This is useful where a file name needs to be replaced
+ * with another file name. For example, suppose that the name separator is "/" and a path represents
+ * "dir1/dir2/foo", then invoking this method with the Path "bar" will result in the Path "dir1/dir2/bar". If this
+ * path does not have a parent path, or other is absolute, then this method returns other. If other is an empty path
+ * then this method returns this path's parent, or where this path doesn't have a parent, the empty path.
+ *
+ * @param path the path to resolve against this path's parent
+ * @return the resulting path
+ */
+ @Override
+ public Path resolveSibling(Path path) {
+ if (path.isAbsolute()) {
+ return path;
+ }
+
+ Path parent = this.getParent();
+ return parent == null ? path : parent.resolve(path);
+ }
+
+ /**
+ * Converts a given path string to a Path and resolves it against this path's parent path in exactly the manner
+ * specified by the resolveSibling method.
+ *
+ * @param path the path string to resolve against this path's parent
+ * @return the resulting path
+ * @throws InvalidPathException if the path string cannot be converted to a Path.
+ */
+ @Override
+ public Path resolveSibling(String path) {
+ return this.resolveSibling(this.parentFileSystem.getPath(path));
+ }
+
+ /**
+ * Constructs a relative path between this path and a given path.
+ *
+ * Relativization is the inverse of resolution. This method attempts to construct a relative path that when resolved
+ * against this path, yields a path that locates the same file as the given path.
+ *
+ * A relative path cannot be constructed if only one of the paths have a root component. If both paths have a root
+ * component, it is still possible to relativize one against the other. If this path and the given path are equal
+ * then an empty path is returned.
+ *
+ * For any two normalized paths p and q, where q does not have a root component,
+ * {@code p.relativize(p.resolve(q)).equals(q)}
+ *
+ * @param path the path to relativize against this path
+ * @return the resulting relative path, or an empty path if both paths are equal
+ * @throws IllegalArgumentException if other is not a Path that can be relativized against this path
+ */
+ @Override
+ public Path relativize(Path path) {
+ if (path.getRoot() == null ^ this.getRoot() == null) {
+ throw LoggingUtility.logError(LOGGER,
+ new IllegalArgumentException("Both paths must be absolute or neither can be"));
+ }
+
+ AzurePath thisNormalized = (AzurePath) this.normalize();
+ Path otherNormalized = path.normalize();
+
+ Deque
+ * This method constructs an absolute URI with a scheme equal to the URI scheme that identifies the provider.
+ *
+ * No authority component is defined for the {@code URI} returned by this method. This implementation offers the
+ * same equivalence guarantee as the default provider.
+ *
+ * @return the URI representing this path
+ * @throws SecurityException never
+ */
+ @Override
+ public URI toUri() {
+ try {
+ return new URI(this.parentFileSystem.provider().getScheme(), null, "/" + this.toAbsolutePath(),
+ null, null);
+ } catch (URISyntaxException e) {
+ throw LoggingUtility.logError(LOGGER, new IllegalStateException("Unable to create valid URI from path", e));
+ }
+ }
+
+ /**
+ * Returns a Path object representing the absolute path of this path.
+ *
+ * If this path is already absolute then this method simply returns this path. Otherwise, this method resolves the
+ * path against the default directory.
+ *
+ * @return a Path object representing the absolute path
+ * @throws SecurityException never
+ */
+ @Override
+ public Path toAbsolutePath() {
+ if (this.isAbsolute()) {
+ return this;
+ }
+ return this.parentFileSystem.getDefaultDirectory().resolve(this);
+ }
+
+ /**
+ * Unsupported.
+ *
+ * @param linkOptions options
+ * @return the real path
+ * @throws UnsupportedOperationException operation not supported.
+ */
+ @Override
+ public Path toRealPath(LinkOption... linkOptions) throws IOException {
+ throw new UnsupportedOperationException("Symbolic links are not supported.");
+ }
+
+ /**
+ * Unsupported.
+ *
+ * @return the file
+ * @throws UnsupportedOperationException operation not supported.
+ */
+ @Override
+ public File toFile() {
+ throw new UnsupportedOperationException();
+ }
+
+ /**
+ * Unsupported.
+ *
+ * @param watchService watchService
+ * @param kinds kinds
+ * @param modifiers modifiers
+ * @return the watch key
+ * @throws UnsupportedOperationException operation not supported.
+ */
+ @Override
+ public WatchKey register(WatchService watchService, WatchEvent.Kind>[] kinds, WatchEvent.Modifier... modifiers)
+ throws IOException {
+ throw new UnsupportedOperationException("WatchEvents are not supported.");
+ }
+
+ /**
+ * Unsupported.
+ *
+ * @param watchService watchService
+ * @param kinds kinds
+ * @return the watch key
+ * @throws UnsupportedOperationException operation not supported.
+ */
+ @Override
+ public WatchKey register(WatchService watchService, WatchEvent.Kind>... kinds) throws IOException {
+ throw new UnsupportedOperationException("WatchEvents are not supported.");
+ }
+
+ /**
+ * Returns an iterator over the name elements of this path.
+ *
+ * The first element returned by the iterator represents the name element that is closest to the root in the
+ * directory hierarchy, the second element is the next closest, and so on. The last element returned is the name of
+ * the file or directory denoted by this path. The root component, if present, is not returned by the iterator.
+ *
+ * @return an iterator over the name elements of this path.
+ */
+ @Override
+ public Iterator
+ * This method may not be used to compare paths that are associated with different file system providers.
+ *
+ * This result of this method is identical to a string comparison on the underlying path strings.
+ *
+ * @return zero if the argument is equal to this path, a value less than zero if this path is lexicographically less
+ * than the argument, or a value greater than zero if this path is lexicographically greater than the argument
+ * @throws ClassCastException if the paths are associated with different providers
+ */
+ @Override
+ public int compareTo(Path path) {
+ if (!(path instanceof AzurePath)) {
+ throw LoggingUtility.logError(LOGGER, new ClassCastException("Other path is not an instance of "
+ + "AzurePath."));
+ }
+
+ return this.pathString.compareTo(((AzurePath) path).pathString);
+ }
+
+ /**
+ * Returns the string representation of this path.
+ *
+ * If this path was created by converting a path string using the getPath method then the path string returned by
+ * this method may differ from the original String used to create the path.
+ *
+ * The returned path string uses the default name separator to separate names in the path.
+ *
+ * @return the string representation of this path
+ */
+ @Override
+ public String toString() {
+ return this.pathString;
+ }
+
+ /**
+ * A path is considered equal to another path if it is associated with the same file system instance and if the
+ * path strings are equivalent.
+ *
+ * @return true if, and only if, the given object is a Path that is identical to this Path
+ */
+ @Override
+ public boolean equals(Object o) {
+ if (this == o) {
+ return true;
+ }
+ if (o == null || getClass() != o.getClass()) {
+ return false;
+ }
+ AzurePath paths = (AzurePath) o;
+ return Objects.equals(parentFileSystem, paths.parentFileSystem)
+ && Objects.equals(pathString, paths.pathString);
+ }
+
+ @Override
+ public int hashCode() {
+ return Objects.hash(parentFileSystem, pathString);
+ }
+
+ /**
+ * Returns a {@link BlobClient} which references a blob pointed to by this path. Note that this does not guarantee
+ * the existence of the blob at this location.
+ *
+ * @return a {@link BlobClient}.
+ * @throws IOException If the path only contains a root component or is empty
+ */
+ public BlobClient toBlobClient() throws IOException {
+ /*
+ We don't store the blob client because unlike other types in this package, a Path does not actually indicate the
+ existence or even validity of any remote resource. It is purely a representation of a path. Therefore, we do not
+ construct the client or perform any validation until it is requested.
+ */
+ // Converting to an absolute path ensures there is a container to operate on even if it is the default.
+ // Normalizing ensures the path is clean.
+ Path root = this.normalize().toAbsolutePath().getRoot();
+ if (root == null) {
+ throw LoggingUtility.logError(LOGGER,
+ new IllegalStateException("Root should never be null after calling toAbsolutePath."));
+ }
+ String fileStoreName = this.rootToFileStore(root.toString());
+
+ BlobContainerClient containerClient =
+ ((AzureFileStore) this.parentFileSystem.getFileStore()).getContainerClient();
+
+ String blobName = this.withoutRoot();
+ if (blobName.isEmpty()) {
+ throw LoggingUtility.logError(LOGGER, new IOException("Cannot get a blob client to a path that only "
+ + "contains the root or is an empty path"));
+ }
+
+ return containerClient.getBlobClient(blobName);
+ }
+
+ /**
+ * A utility method to conveniently convert from a URL to a storage resource to an {@code AzurePath} pointing to the
+ * same resource.
+ *
+ * The url must be well formatted. There must be an open filesystem corresponding to the account which contains the
+ * blob. Otherwise, a {@link java.nio.file.FileSystemNotFoundException} will be thrown.
+ *
+ * The url may point to either an account, container, or blob. If it points to an account, the path will be empty,
+ * but it will have an internal reference to the file system containing it, meaning instance methods may be
+ * performed on the path to construct a reference to another object. If it points to a container, there will be one
+ * element, which is the root element. Everything after the container, that is the blob name, will then be appended
+ * after the root element.
+ *
+ * IP style urls are not currently supported.
+ *
+ * The {@link AzureFileSystemProvider} can typically be obtained via {@link AzureFileSystem#provider()}.
+ *
+ * @param provider The installed {@link AzureFileSystemProvider} that manages open file systems for this jvm.
+ * @param url The url to the desired resource.
+ * @return An {@link AzurePath} which points to the resource identified by the url.
+ * @throws URISyntaxException If the url contains elements which are not well formatted.
+ */
+ public static AzurePath fromBlobUrl(AzureFileSystemProvider provider, String url) throws URISyntaxException {
+ BlobUrlParts parts = BlobUrlParts.parse(url);
+ URI fileSystemUri = hostToFileSystemUri(provider, parts.getScheme(), parts.getHost());
+ FileSystem parentFileSystem = provider.getFileSystem(fileSystemUri);
+ return new AzurePath((AzureFileSystem) parentFileSystem, fileStoreToRoot(parts.getBlobContainerName()),
+ parts.getBlobName() == null ? "" : parts.getBlobName());
+ }
+
+ /**
+ * @return Whether this path consists of only a root component.
+ */
+ boolean isRoot() {
+ return this.equals(this.getRoot());
+ }
+
+ private String withoutRoot() {
+ Path root = this.getRoot();
+ String str = this.pathString;
+ if (root != null) {
+ str = this.pathString.substring(root.toString().length());
+ }
+ if (str.startsWith(this.parentFileSystem.getSeparator())) {
+ str = str.substring(1);
+ }
+
+ return str;
+ }
+
+ private String[] splitToElements() {
+ return this.splitToElements(this.pathString);
+ }
+
+ private String[] splitToElements(String str) {
+ String[] arr = str.split(this.parentFileSystem.getSeparator());
+ /*
+ This is a special case where we split after removing the root from a path that is just the root. Or otherwise
+ have an empty path.
+ */
+ if (arr.length == 1 && arr[0].isEmpty()) {
+ return new String[0];
+ }
+ return arr;
+ }
+
+ private String rootToFileStore(String root) {
+ return root.substring(0, root.length() - 1); // Remove the ROOT_DIR_SUFFIX
+ }
+
+ private static String fileStoreToRoot(String fileStore) {
+ if (fileStore == null || "".equals(fileStore)) {
+ return "";
+ }
+ return fileStore + ROOT_DIR_SUFFIX;
+ }
+
+ private static URI hostToFileSystemUri(AzureFileSystemProvider provider, String scheme, String host)
+ throws URISyntaxException {
+ return new URI(provider.getScheme() + "://?endpoint=" + scheme + "://" + host);
+ }
+
+ static void ensureFileSystemOpen(Path p) {
+ if (!p.getFileSystem().isOpen()) {
+ throw LoggingUtility.logError(LOGGER, new ClosedFileSystemException());
+ }
+ }
+}
diff --git a/azure-blob-nio/src/main/java/com/azure/storage/blob/nio/AzureResource.java b/azure-blob-nio/src/main/java/com/azure/storage/blob/nio/AzureResource.java
new file mode 100644
index 00000000000..92fb14a62cc
--- /dev/null
+++ b/azure-blob-nio/src/main/java/com/azure/storage/blob/nio/AzureResource.java
@@ -0,0 +1,284 @@
+// Copyright (c) Microsoft Corporation. All rights reserved.
+// Licensed under the MIT License.
+
+package com.azure.storage.blob.nio;
+
+import com.azure.core.util.logging.ClientLogger;
+import com.azure.storage.blob.BlobContainerClient;
+import com.azure.storage.blob.BlobClient;
+import com.azure.storage.blob.BlobContainerClientBuilder;
+import com.azure.storage.blob.models.BlobHttpHeaders;
+import com.azure.storage.blob.models.BlobItem;
+import com.azure.storage.blob.models.BlobListDetails;
+import com.azure.storage.blob.models.BlobProperties;
+import com.azure.storage.blob.models.BlobRequestConditions;
+import com.azure.storage.blob.models.BlobStorageException;
+import com.azure.storage.blob.models.ListBlobsOptions;
+import com.azure.storage.blob.models.ParallelTransferOptions;
+import com.azure.storage.blob.options.BlockBlobOutputStreamOptions;
+import com.azure.storage.blob.specialized.BlobOutputStream;
+import com.azure.storage.common.implementation.Constants;
+
+import java.io.IOException;
+import java.nio.file.Path;
+import java.nio.file.attribute.FileAttribute;
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.Iterator;
+import java.util.List;
+import java.util.Map;
+import java.util.Objects;
+
+/**
+ * This type is meant to be a logical grouping of operations and data associated with an azure resource. It is NOT
+ * intended to serve as a local cache for any data related to remote resources. It is agnostic to whether the resource
+ * is a directory or a file and will not perform any validation of the resource type, though root directories are not
+ * supported as they are backed by containers and do not support many file system apis.
+ *
+ * It also serves as the interface to Storage clients. Any operation that needs to use a client should first build an
+ * AzureResource using a path and then use the getter to access the client.
+ */
+final class AzureResource {
+ private static final ClientLogger LOGGER = new ClientLogger(AzureResource.class);
+
+ static final String DIR_METADATA_MARKER = Constants.HeaderConstants.DIRECTORY_METADATA_KEY;
+
+ private final AzurePath path;
+ private final BlobClient blobClient;
+
+ // The following are not kept consistent with the service. They are only held here between parsing and putting.
+ private BlobHttpHeaders blobHeaders;
+ private Map
+ * A channel may only be opened in read mode OR write mode. It may not be opened in read/write mode. Seeking is
+ * supported for reads, but not for writes. Modifications to existing files is not permitted--only creating new files or
+ * overwriting existing files.
+ *
+ * This type is not threadsafe to prevent having to hold locks across network calls.
+ */
+public final class AzureSeekableByteChannel implements SeekableByteChannel {
+ private static final ClientLogger LOGGER = new ClientLogger(AzureSeekableByteChannel.class);
+
+ private final NioBlobInputStream reader;
+ private final NioBlobOutputStream writer;
+ private long position;
+ private boolean closed = false;
+ private final Path path;
+ /*
+ If this type needs to be made threadsafe, closed should be volatile. We need to add a lock to guard updates to
+ position or make it an atomicLong. If we have a lock, we have to be careful about holding while doing io ops and at
+ least ensure timeouts are set. We probably have to duplicate or copy the buffers for at least writing to ensure they
+ don't get overwritten.
+ */
+
+ AzureSeekableByteChannel(NioBlobInputStream inputStream, Path path) {
+ this.reader = inputStream;
+ /*
+ We mark at the beginning (we always construct a stream to the beginning of the blob) to support seeking. We can
+ effectively seek anywhere by always marking at the beginning of the blob and then a seek is resetting to that
+ mark and skipping.
+ */
+ inputStream.mark(Integer.MAX_VALUE);
+ this.writer = null;
+ this.position = 0;
+ this.path = path;
+ }
+
+ AzureSeekableByteChannel(NioBlobOutputStream outputStream, Path path) {
+ this.writer = outputStream;
+ this.reader = null;
+ this.position = 0;
+ this.path = path;
+ }
+
+ @Override
+ public int read(ByteBuffer dst) throws IOException {
+ AzurePath.ensureFileSystemOpen(this.path);
+ validateOpen();
+ validateReadMode();
+
+ // See comments in position(), remember that position is 0-based and size() is exclusive
+ if (this.position >= this.size()) {
+ return -1; // at or past EOF
+ }
+
+ // If the buffer is backed by an array, we can write directly to that instead of allocating new memory.
+ int pos;
+ final int limit;
+ final byte[] buf;
+ if (dst.hasArray()) {
+ // ByteBuffer has a position and limit that define the bounds of the writeable area, and that
+ // area can be both smaller than the backing array and might not begin at array index 0.
+ pos = dst.position();
+ limit = pos + dst.remaining();
+ buf = dst.array();
+ } else {
+ pos = 0;
+ limit = dst.remaining();
+ buf = new byte[limit];
+ }
+
+ while (pos < limit) {
+ int byteCount = this.reader.read(buf, pos, limit - pos);
+ if (byteCount == -1) {
+ break;
+ }
+ pos += byteCount;
+ }
+
+ /*
+ Either write to the destination if we had to buffer separately or just set the position correctly if we wrote
+ underneath the buffer
+ */
+ int count;
+ if (dst.hasArray()) {
+ count = pos - dst.position();
+ dst.position(pos);
+ } else {
+ count = pos; // original position was 0
+ dst.put(buf, 0, count);
+ }
+
+ this.position += count;
+ return count;
+ }
+
+ @Override
+ public int write(ByteBuffer src) throws IOException {
+ AzurePath.ensureFileSystemOpen(this.path);
+ validateOpen();
+ validateWriteMode();
+
+ final int length = src.remaining();
+ this.position += length;
+
+ /*
+ If the buffer is backed by an array, we can read directly from that instead of allocating new memory.
+ Set the position correctly if we read from underneath the buffer
+ */
+ int pos;
+ byte[] buf;
+ if (src.hasArray()) {
+ // ByteBuffer has a position and limit that define the bounds of the readable area, and that
+ // area can be both smaller than the backing array and might not begin at array index 0.
+ pos = src.position();
+ buf = src.array();
+ src.position(pos + length);
+ } else {
+ pos = 0;
+ buf = new byte[length];
+ src.get(buf); // advances src.position()
+ }
+ // Either way, the src.position() and this.position have been updated before we know if this write
+ // will succeed. (Original behavior.) It may be better to update position(s) only *after* success,
+ // but then on IOException would we know if there was a partial write, and if so how much?
+ this.writer.write(buf, pos, length);
+ return length;
+ }
+
+ @Override
+ public long position() throws IOException {
+ AzurePath.ensureFileSystemOpen(this.path);
+ validateOpen();
+
+ return this.position;
+ }
+
+ @Override
+ public AzureSeekableByteChannel position(long newPosition) throws IOException {
+ AzurePath.ensureFileSystemOpen(this.path);
+ validateOpen();
+ validateReadMode();
+
+ if (newPosition < 0) {
+ throw LoggingUtility.logError(LOGGER, new IllegalArgumentException("Seek position cannot be negative"));
+ }
+
+ /*
+ The javadoc says seeking past the end for reading is legal and that it should indicate the end of the file on
+ the next read. StorageInputStream doesn't allow this, but we can get around that by modifying the
+ position variable and skipping the actual read (when read is called next); we'll check in read if we've seeked
+ past the end and short circuit there as well.
+
+ Because we are in read mode this will always give us the size from properties.
+ */
+ if (newPosition > this.size()) {
+ this.position = newPosition;
+ return this;
+ }
+ this.reader.reset(); // Because we always mark at the beginning, this will reset us back to the beginning.
+ this.reader.mark(Integer.MAX_VALUE);
+ long skipAmount = this.reader.skip(newPosition);
+ if (skipAmount < newPosition) {
+ throw new IOException("Could not set desired position");
+ }
+ this.position = newPosition;
+
+ return this;
+ }
+
+ @Override
+ public long size() throws IOException {
+ AzurePath.ensureFileSystemOpen(this.path);
+ validateOpen();
+
+ /*
+ If we are in read mode, the size is the size of the file.
+ If we are in write mode, the size is the amount of data written so far.
+ */
+ if (reader != null) {
+ return reader.getBlobInputStream().getProperties().getBlobSize();
+ } else {
+ return position;
+ }
+ }
+
+ @Override
+ public AzureSeekableByteChannel truncate(long size) throws IOException {
+ throw LoggingUtility.logError(LOGGER, new UnsupportedOperationException());
+ }
+
+ @Override
+ public boolean isOpen() {
+ AzurePath.ensureFileSystemOpen(this.path);
+ return !this.closed;
+ }
+
+ @Override
+ public void close() throws IOException {
+ AzurePath.ensureFileSystemOpen(this.path);
+ if (this.reader != null) {
+ this.reader.close();
+ } else {
+ this.writer.close();
+ }
+ this.closed = true;
+ }
+
+ Path getPath() {
+ return this.path;
+ }
+
+ private void validateOpen() throws ClosedChannelException {
+ if (this.closed) {
+ throw LoggingUtility.logError(LOGGER, new ClosedChannelException());
+ }
+ }
+
+ private void validateReadMode() {
+ if (this.reader == null) {
+ throw LoggingUtility.logError(LOGGER, new NonReadableChannelException());
+ }
+ }
+
+ private void validateWriteMode() {
+ if (this.writer == null) {
+ throw LoggingUtility.logError(LOGGER, new NonWritableChannelException());
+ }
+ }
+}
diff --git a/azure-blob-nio/src/main/java/com/azure/storage/blob/nio/DirectoryStatus.java b/azure-blob-nio/src/main/java/com/azure/storage/blob/nio/DirectoryStatus.java
new file mode 100644
index 00000000000..8356a7ebeb1
--- /dev/null
+++ b/azure-blob-nio/src/main/java/com/azure/storage/blob/nio/DirectoryStatus.java
@@ -0,0 +1,23 @@
+// Copyright (c) Microsoft Corporation. All rights reserved.
+// Licensed under the MIT License.
+
+package com.azure.storage.blob.nio;
+
+/**
+ * RESERVED FOR INTERNAL USE.
+ *
+ * An enum to indicate the status of a directory.
+ */
+enum DirectoryStatus {
+ EMPTY, // The directory at least weakly exists and is empty.
+
+ NOT_EMPTY, // The directory at least weakly exists and has one or more children.
+
+ DOES_NOT_EXIST, // There is no resource at this path.
+
+ NOT_A_DIRECTORY; // A resource exists at this path, but it is not a directory.
+
+ static boolean isDirectory(DirectoryStatus status) {
+ return EMPTY.equals(status) || NOT_EMPTY.equals(status);
+ }
+}
diff --git a/azure-blob-nio/src/main/java/com/azure/storage/blob/nio/LoggingUtility.java b/azure-blob-nio/src/main/java/com/azure/storage/blob/nio/LoggingUtility.java
new file mode 100644
index 00000000000..3cd503f98c2
--- /dev/null
+++ b/azure-blob-nio/src/main/java/com/azure/storage/blob/nio/LoggingUtility.java
@@ -0,0 +1,16 @@
+// Copyright (c) Microsoft Corporation. All rights reserved.
+// Licensed under the MIT License.
+
+package com.azure.storage.blob.nio;
+
+import com.azure.core.util.logging.ClientLogger;
+
+/**
+ * Only a minimal Utility class to get around a shortcoming in Core's logging.
+ */
+final class LoggingUtility {
+ public static
+ *
+ *
+ * @param type the file attribute view type
+ * @return Whether the file attribute view is supported.
+ */
+ @Override
+ public boolean supportsFileAttributeView(Class extends FileAttributeView> type) {
+ return AzureFileSystem.SUPPORTED_ATTRIBUTE_VIEWS.containsKey(type);
+ }
+
+ /**
+ * Tells whether this file store supports the file attributes identified by the given file attribute view.
+ *
+ *
+ *
+ * @param name the name of the file attribute view
+ * @return whether the file attribute view is supported.
+ */
+ @Override
+ public boolean supportsFileAttributeView(String name) {
+ return AzureFileSystem.SUPPORTED_ATTRIBUTE_VIEWS.containsValue(name);
+ }
+
+ /**
+ * Returns a FileStoreAttributeView of the given type.
+ *
+ *
+ */
+ @Override
+ public Set
+ *
+ *
+ *
+ * Note that these properties also have a particular semantic in that if one is specified, all are updated. In other
+ * words, if any of the above is set, all those that are not set will be cleared. See the
+ * Azure Docs for more
+ * information.
+ *
+ * @param path the directory to create
+ * @param fileAttributes an optional list of file attributes to set atomically when creating the directory
+ * @throws IllegalArgumentException If the path type is not an instance of {@link AzurePath}.
+ * @throws UnsupportedOperationException if the array contains an attribute that cannot be set atomically when
+ * creating the directory
+ * @throws FileAlreadyExistsException if a directory could not otherwise be created because a file of that name
+ * already exists
+ * @throws IOException If an I/O error occurs.
+ * @throws SecurityException never
+ */
+ @Override
+ public void createDirectory(Path path, FileAttribute>... fileAttributes) throws IOException {
+ fileAttributes = fileAttributes == null ? new FileAttribute>[0] : fileAttributes;
+
+ // Get the destination for the directory. Will throw if path is a root.
+ AzureResource azureResource = new AzureResource(path);
+ AzurePath.ensureFileSystemOpen(azureResource.getPath());
+
+ // Check if parent exists. If it does, atomically check if a file already exists and create a new dir if not.
+ if (azureResource.checkParentDirectoryExists()) {
+ try {
+ azureResource.setFileAttributes(Arrays.asList(fileAttributes))
+ .putDirectoryBlob(new BlobRequestConditions().setIfNoneMatch("*"));
+ } catch (BlobStorageException e) {
+ if (e.getStatusCode() == HttpURLConnection.HTTP_CONFLICT
+ && e.getErrorCode().equals(BlobErrorCode.BLOB_ALREADY_EXISTS)) {
+ throw LoggingUtility.logError(ClientLoggerHolder.LOGGER,
+ new FileAlreadyExistsException(azureResource.getPath().toString()));
+ } else {
+ throw LoggingUtility.logError(ClientLoggerHolder.LOGGER,
+ new IOException("An error occurred when creating the directory", e));
+ }
+ }
+ } else {
+ throw LoggingUtility.logError(ClientLoggerHolder.LOGGER,
+ new IOException("Parent directory does not exist for path: " + azureResource.getPath()));
+ }
+ }
+
+ /**
+ * Deletes the specified resource.
+ * int
which represents an estimate of the number of bytes that can be read (or skipped
+ * over) from this input stream without blocking, or 0 when it reaches the end of the input stream.
+ */
+ @Override
+ public synchronized int available() throws IOException {
+ AzurePath.ensureFileSystemOpen(path);
+ return this.blobInputStream.available();
+ }
+
+ /**
+ * Closes this input stream and releases any system resources associated with the stream.
+ */
+ @Override
+ public synchronized void close() throws IOException {
+ AzurePath.ensureFileSystemOpen(path);
+ this.blobInputStream.close();
+ }
+
+ /**
+ * Marks the current position in this input stream. A subsequent call to the reset method repositions this stream at
+ * the last marked position so that subsequent reads re-read the same bytes.
+ *
+ * @param readlimit An int
which represents the maximum limit of bytes that can be read before the mark
+ * position becomes invalid.
+ */
+ @Override
+ public synchronized void mark(final int readlimit) {
+ this.blobInputStream.mark(readlimit);
+ }
+
+ /**
+ * Tests if this input stream supports the mark and reset methods.
+ *
+ * @return Returns {@code true}
+ */
+ @Override
+ public boolean markSupported() {
+ return this.blobInputStream.markSupported();
+ }
+
+ /**
+ * Reads the next byte of data from the input stream. The value byte is returned as an int in the range 0 to 255. If
+ * no byte is available because the end of the stream has been reached, the value -1 is returned. This method blocks
+ * until input data is available, the end of the stream is detected, or an exception is thrown.
+ *
+ * @return An int
which represents the total number of bytes read into the buffer, or -1 if there is no
+ * more data because the end of the stream has been reached.
+ * @throws IOException If an I/O error occurs.
+ */
+ @Override
+ public int read() throws IOException {
+ AzurePath.ensureFileSystemOpen(path);
+ try {
+ return this.blobInputStream.read();
+ /*
+ BlobInputStream only throws RuntimeException, and it doesn't preserve the cause, it only takes the message,
+ so we can't do any better than re-wrapping it in an IOException.
+ */
+ } catch (RuntimeException e) {
+ throw LoggingUtility.logError(LOGGER, new IOException(e));
+ }
+ }
+
+ /**
+ * Reads some number of bytes from the input stream and stores them into the buffer array b
. The number
+ * of bytes actually read is returned as an integer. This method blocks until input data is available, end of file
+ * is detected, or an exception is thrown. If the length of b
is zero, then no bytes are read and 0 is
+ * returned; otherwise, there is an attempt to read at least one byte. If no byte is available because the stream is
+ * at the end of the file, the value -1 is returned; otherwise, at least one byte is read and stored into
+ * b
.
+ *
+ * The first byte read is stored into element b[0]
, the next one into b[1]
, and so on. The
+ * number of bytes read is, at most, equal to the length of b
. Let k
be the number of
+ * bytes actually read; these bytes will be stored in elements b[0]
through b[k-1]
,
+ * leaving elements b[k]
through
+ * b[b.length-1]
unaffected.
+ *
+ * The read(b)
method for class {@link InputStream} has the same effect as:
+ *
+ * read(b, 0, b.length)
+ *
+ * @param b A byte
array which represents the buffer into which the data is read.
+ * @throws IOException If the first byte cannot be read for any reason other than the end of the file, if the input
+ * stream has been closed, or if some other I/O error occurs.
+ * @throws NullPointerException If the byte
array b
is null.
+ */
+ @Override
+ public int read(final byte[] b) throws IOException {
+ AzurePath.ensureFileSystemOpen(path);
+ try {
+ return this.blobInputStream.read(b);
+ } catch (RuntimeException e) {
+ throw LoggingUtility.logError(LOGGER, new IOException(e));
+ }
+ }
+
+ /**
+ * Reads up to len
bytes of data from the input stream into an array of bytes. An attempt is made to
+ * read as many as len
bytes, but a smaller number may be read. The number of bytes actually read is
+ * returned as an integer. This method blocks until input data is available, end of file is detected, or an
+ * exception is thrown.
+ *
+ * If len
is zero, then no bytes are read and 0 is returned; otherwise, there is an attempt to read at
+ * least one byte. If no byte is available because the stream is at end of file, the value -1 is returned;
+ * otherwise, at least one byte is read and stored into b
.
+ *
+ * The first byte read is stored into element b[off]
, the next one into b[off+1]
, and so
+ * on. The number of bytes read is, at most, equal to len
. Let k
be the number of bytes
+ * actually read; these bytes will be stored in elements b[off]
through b[off+k-1]
,
+ * leaving elements b[off+k]
through
+ * b[off+len-1]
unaffected.
+ *
+ * In every case, elements b[0]
through b[off]
and elements b[off+len]
+ * through b[b.length-1]
are unaffected.
+ *
+ * @param b A byte
array which represents the buffer into which the data is read.
+ * @param off An int
which represents the start offset in the byte
array at which the data
+ * is written.
+ * @param len An int
which represents the maximum number of bytes to read.
+ * @return An int
which represents the total number of bytes read into the buffer, or -1 if there is no
+ * more data because the end of the stream has been reached.
+ * @throws IOException If the first byte cannot be read for any reason other than end of file, or if the input
+ * stream has been closed, or if some other I/O error occurs.
+ * @throws NullPointerException If the byte
array b
is null.
+ * @throws IndexOutOfBoundsException If off
is negative, len
is negative, or
+ * len
is greater than
+ * b.length - off
.
+ */
+ @Override
+ public int read(final byte[] b, final int off, final int len) throws IOException {
+ AzurePath.ensureFileSystemOpen(path);
+ if (off < 0 || len < 0 || len > b.length - off) {
+ throw LOGGER.logExceptionAsError(new IndexOutOfBoundsException());
+ }
+ try {
+ return this.blobInputStream.read(b, off, len);
+ } catch (RuntimeException e) {
+ throw LoggingUtility.logError(LOGGER, new IOException(e));
+ }
+ }
+
+ /**
+ * Repositions this stream to the position at the time the mark method was last called on this input stream. Note
+ * repositioning the blob read stream will disable blob MD5 checking.
+ *
+ * @throws IOException If this stream has not been marked or if the mark has been invalidated.
+ */
+ @Override
+ public synchronized void reset() throws IOException {
+ AzurePath.ensureFileSystemOpen(path);
+ try {
+ this.blobInputStream.reset();
+ } catch (RuntimeException e) {
+ if (e.getMessage().equals("Stream mark expired.")) {
+ throw LoggingUtility.logError(LOGGER, new IOException(e));
+ }
+ throw LoggingUtility.logError(LOGGER, e);
+ }
+ }
+
+ /**
+ * Skips over and discards n bytes of data from this input stream. The skip method may, for a variety of reasons,
+ * end up skipping over some smaller number of bytes, possibly 0. This may result from any of a number of
+ * conditions; reaching end of file before n bytes have been skipped is only one possibility. The actual number of
+ * bytes skipped is returned. If n is negative, no bytes are skipped.
+ *
+ * Note repositioning the blob read stream will disable blob MD5 checking.
+ *
+ * @param n A long
which represents the number of bytes to skip.
+ */
+ @Override
+ public synchronized long skip(final long n) throws IOException {
+ AzurePath.ensureFileSystemOpen(path);
+ return this.blobInputStream.skip(n);
+ }
+
+ BlobInputStream getBlobInputStream() {
+ return blobInputStream;
+ }
+}
diff --git a/azure-blob-nio/src/main/java/com/azure/storage/blob/nio/NioBlobOutputStream.java b/azure-blob-nio/src/main/java/com/azure/storage/blob/nio/NioBlobOutputStream.java
new file mode 100644
index 00000000000..ae5c0fa02b1
--- /dev/null
+++ b/azure-blob-nio/src/main/java/com/azure/storage/blob/nio/NioBlobOutputStream.java
@@ -0,0 +1,99 @@
+// Copyright (c) Microsoft Corporation. All rights reserved.
+// Licensed under the MIT License.
+
+package com.azure.storage.blob.nio;
+
+import com.azure.core.util.logging.ClientLogger;
+import com.azure.storage.blob.specialized.BlobOutputStream;
+
+import java.io.IOException;
+import java.io.OutputStream;
+import java.nio.file.Path;
+
+/**
+ * Provides an OutputStream to write to a file stored as an Azure Blob.
+ */
+public final class NioBlobOutputStream extends OutputStream {
+ private static final ClientLogger LOGGER = new ClientLogger(NioBlobOutputStream.class);
+
+ private final BlobOutputStream blobOutputStream;
+ private final Path path;
+
+ NioBlobOutputStream(BlobOutputStream blobOutputStream, Path path) {
+ this.blobOutputStream = blobOutputStream;
+ this.path = path;
+ }
+
+ @Override
+ public synchronized void write(int i) throws IOException {
+ AzurePath.ensureFileSystemOpen(path);
+ try {
+ this.blobOutputStream.write(i);
+ /*
+ BlobOutputStream only throws RuntimeException, and it doesn't preserve the cause, it only takes the message,
+ so we can't do any better than re-wrapping it in an IOException.
+ */
+ } catch (RuntimeException e) {
+ throw LoggingUtility.logError(LOGGER, new IOException(e));
+ }
+ }
+
+ @Override
+ public synchronized void write(byte[] b) throws IOException {
+ AzurePath.ensureFileSystemOpen(path);
+ try {
+ this.blobOutputStream.write(b);
+ /*
+ BlobOutputStream only throws RuntimeException, and it doesn't preserve the cause, it only takes the message,
+ so we can't do any better than re-wrapping it in an IOException.
+ */
+ } catch (RuntimeException e) {
+ throw LoggingUtility.logError(LOGGER, new IOException(e));
+ }
+ }
+
+ @Override
+ public synchronized void write(byte[] b, int off, int len) throws IOException {
+ AzurePath.ensureFileSystemOpen(path);
+ try {
+ this.blobOutputStream.write(b, off, len);
+ /*
+ BlobOutputStream only throws RuntimeException, and it doesn't preserve the cause, it only takes the message,
+ so we can't do any better than re-wrapping it in an IOException.
+ */
+ } catch (RuntimeException e) {
+ if (e instanceof IndexOutOfBoundsException) {
+ throw LoggingUtility.logError(LOGGER, e);
+ }
+ throw LoggingUtility.logError(LOGGER, new IOException(e));
+ }
+ }
+
+ @Override
+ public synchronized void flush() throws IOException {
+ AzurePath.ensureFileSystemOpen(path);
+ try {
+ this.blobOutputStream.flush();
+ /*
+ BlobOutputStream only throws RuntimeException, and it doesn't preserve the cause, it only takes the message,
+ so we can't do any better than re-wrapping it in an IOException.
+ */
+ } catch (RuntimeException e) {
+ throw LoggingUtility.logError(LOGGER, new IOException(e));
+ }
+ }
+
+ @Override
+ public synchronized void close() throws IOException {
+ AzurePath.ensureFileSystemOpen(path);
+ try {
+ this.blobOutputStream.close();
+ /*
+ BlobOutputStream only throws RuntimeException, and it doesn't preserve the cause, it only takes the message,
+ so we can't do any better than re-wrapping it in an IOException.
+ */
+ } catch (RuntimeException e) {
+ throw LoggingUtility.logError(LOGGER, new IOException(e));
+ }
+ }
+}
diff --git a/azure-blob-nio/src/main/java/com/azure/storage/blob/nio/package-info.java b/azure-blob-nio/src/main/java/com/azure/storage/blob/nio/package-info.java
new file mode 100644
index 00000000000..96cd1fbd627
--- /dev/null
+++ b/azure-blob-nio/src/main/java/com/azure/storage/blob/nio/package-info.java
@@ -0,0 +1,7 @@
+// Copyright (c) Microsoft Corporation. All rights reserved.
+// Licensed under the MIT License.
+
+/**
+ * Package containing the classes for loading the AzureFileSystemProvider based on Azure Storage Blobs.
+ */
+package com.azure.storage.blob.nio;
diff --git a/azure-blob-nio/src/main/resources/META-INF/services/java.nio.file.spi.FileSystemProvider b/azure-blob-nio/src/main/resources/META-INF/services/java.nio.file.spi.FileSystemProvider
new file mode 100644
index 00000000000..5cc2b4ead14
--- /dev/null
+++ b/azure-blob-nio/src/main/resources/META-INF/services/java.nio.file.spi.FileSystemProvider
@@ -0,0 +1 @@
+com.azure.storage.blob.nio.AzureFileSystemProvider
diff --git a/azure-blob-nio/src/main/resources/azure-storage-blob-nio.properties b/azure-blob-nio/src/main/resources/azure-storage-blob-nio.properties
new file mode 100644
index 00000000000..ca812989b4f
--- /dev/null
+++ b/azure-blob-nio/src/main/resources/azure-storage-blob-nio.properties
@@ -0,0 +1,2 @@
+name=${project.artifactId}
+version=${project.version}
diff --git a/azure-blob-nio/src/samples/java/com/azure/storage/blob/nio/ReadmeSamples.java b/azure-blob-nio/src/samples/java/com/azure/storage/blob/nio/ReadmeSamples.java
new file mode 100644
index 00000000000..6c8c5e06e0b
--- /dev/null
+++ b/azure-blob-nio/src/samples/java/com/azure/storage/blob/nio/ReadmeSamples.java
@@ -0,0 +1,129 @@
+// Copyright (c) Microsoft Corporation. All rights reserved.
+// Licensed under the MIT License.
+package com.azure.storage.blob.nio;
+
+import com.azure.storage.blob.models.BlobHttpHeaders;
+import com.azure.storage.common.StorageSharedKeyCredential;
+
+import java.io.IOException;
+import java.io.InputStream;
+import java.io.OutputStream;
+import java.net.URI;
+import java.net.URISyntaxException;
+import java.nio.file.FileSystem;
+import java.nio.file.FileSystems;
+import java.nio.file.Files;
+import java.nio.file.Path;
+import java.nio.file.StandardCopyOption;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.Map;
+
+/**
+ * WARNING: MODIFYING THIS FILE WILL REQUIRE CORRESPONDING UPDATES TO README.md FILE. LINE NUMBERS
+ * ARE USED TO EXTRACT APPROPRIATE CODE SEGMENTS FROM THIS FILE. ADD NEW CODE AT THE BOTTOM TO AVOID CHANGING
+ * LINE NUMBERS OF EXISTING CODE SAMPLES.
+ *
+ * Code samples for the README.md
+ */
+public class ReadmeSamples {
+
+ private static final String CONTAINER_STORES = "container1,container2"; // A comma separated list of container names
+ private static final StorageSharedKeyCredential SHARE_KEY_CREDENTIAL
+ = new StorageSharedKeyCredential("